code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def get_route_edge_attributes(G, route, attribute=None, minimize_key='length', retrieve_default=None):
"""
Get a list of attribute values for each edge in a path.
Parameters
----------
G : networkx multidigraph
route : list
list of nodes in the path
attribute : string
the name of the attribute to get the value of for each edge.
If not specified, the complete data dict is returned for each edge.
minimize_key : string
if there are parallel edges between two nodes, select the one with the
lowest value of minimize_key
retrieve_default : Callable[Tuple[Any, Any], Any]
Function called with the edge nodes as parameters to retrieve a default value, if the edge does not
contain the given attribute. Per default, a `KeyError` is raised
Returns
-------
attribute_values : list
list of edge attribute values
"""
attribute_values = []
for u, v in zip(route[:-1], route[1:]):
# if there are parallel edges between two nodes, select the one with the
# lowest value of minimize_key
data = min(G.get_edge_data(u, v).values(), key=lambda x: x[minimize_key])
if attribute is None:
attribute_value = data
elif retrieve_default is not None:
attribute_value = data.get(attribute, retrieve_default(u, v))
else:
attribute_value = data[attribute]
attribute_values.append(attribute_value)
return attribute_values | Get a list of attribute values for each edge in a path.
Parameters
----------
G : networkx multidigraph
route : list
list of nodes in the path
attribute : string
the name of the attribute to get the value of for each edge.
If not specified, the complete data dict is returned for each edge.
minimize_key : string
if there are parallel edges between two nodes, select the one with the
lowest value of minimize_key
retrieve_default : Callable[Tuple[Any, Any], Any]
Function called with the edge nodes as parameters to retrieve a default value, if the edge does not
contain the given attribute. Per default, a `KeyError` is raised
Returns
-------
attribute_values : list
list of edge attribute values | Below is the the instruction that describes the task:
### Input:
Get a list of attribute values for each edge in a path.
Parameters
----------
G : networkx multidigraph
route : list
list of nodes in the path
attribute : string
the name of the attribute to get the value of for each edge.
If not specified, the complete data dict is returned for each edge.
minimize_key : string
if there are parallel edges between two nodes, select the one with the
lowest value of minimize_key
retrieve_default : Callable[Tuple[Any, Any], Any]
Function called with the edge nodes as parameters to retrieve a default value, if the edge does not
contain the given attribute. Per default, a `KeyError` is raised
Returns
-------
attribute_values : list
list of edge attribute values
### Response:
def get_route_edge_attributes(G, route, attribute=None, minimize_key='length', retrieve_default=None):
"""
Get a list of attribute values for each edge in a path.
Parameters
----------
G : networkx multidigraph
route : list
list of nodes in the path
attribute : string
the name of the attribute to get the value of for each edge.
If not specified, the complete data dict is returned for each edge.
minimize_key : string
if there are parallel edges between two nodes, select the one with the
lowest value of minimize_key
retrieve_default : Callable[Tuple[Any, Any], Any]
Function called with the edge nodes as parameters to retrieve a default value, if the edge does not
contain the given attribute. Per default, a `KeyError` is raised
Returns
-------
attribute_values : list
list of edge attribute values
"""
attribute_values = []
for u, v in zip(route[:-1], route[1:]):
# if there are parallel edges between two nodes, select the one with the
# lowest value of minimize_key
data = min(G.get_edge_data(u, v).values(), key=lambda x: x[minimize_key])
if attribute is None:
attribute_value = data
elif retrieve_default is not None:
attribute_value = data.get(attribute, retrieve_default(u, v))
else:
attribute_value = data[attribute]
attribute_values.append(attribute_value)
return attribute_values |
def curl_remote_name(cls, file_url):
"""Download file_url, and save as a file name of the URL.
It behaves like "curl -O or --remote-name".
It raises HTTPError if the file_url not found.
"""
tar_gz_file_name = file_url.split('/')[-1]
if sys.version_info >= (3, 2):
from urllib.request import urlopen
from urllib.error import HTTPError
else:
from urllib2 import urlopen
from urllib2 import HTTPError
response = None
try:
response = urlopen(file_url)
except HTTPError as e:
message = 'Download failed: URL: {0}, reason: {1}'.format(
file_url, e)
if 'HTTP Error 404' in str(e):
raise RemoteFileNotFoundError(message)
else:
raise InstallError(message)
tar_gz_file_obj = io.BytesIO(response.read())
with open(tar_gz_file_name, 'wb') as f_out:
f_out.write(tar_gz_file_obj.read())
return tar_gz_file_name | Download file_url, and save as a file name of the URL.
It behaves like "curl -O or --remote-name".
It raises HTTPError if the file_url not found. | Below is the the instruction that describes the task:
### Input:
Download file_url, and save as a file name of the URL.
It behaves like "curl -O or --remote-name".
It raises HTTPError if the file_url not found.
### Response:
def curl_remote_name(cls, file_url):
"""Download file_url, and save as a file name of the URL.
It behaves like "curl -O or --remote-name".
It raises HTTPError if the file_url not found.
"""
tar_gz_file_name = file_url.split('/')[-1]
if sys.version_info >= (3, 2):
from urllib.request import urlopen
from urllib.error import HTTPError
else:
from urllib2 import urlopen
from urllib2 import HTTPError
response = None
try:
response = urlopen(file_url)
except HTTPError as e:
message = 'Download failed: URL: {0}, reason: {1}'.format(
file_url, e)
if 'HTTP Error 404' in str(e):
raise RemoteFileNotFoundError(message)
else:
raise InstallError(message)
tar_gz_file_obj = io.BytesIO(response.read())
with open(tar_gz_file_name, 'wb') as f_out:
f_out.write(tar_gz_file_obj.read())
return tar_gz_file_name |
def get_language_and_region(self):
"""
Returns the combined language+region string or \x00\x00 for the default locale
:return:
"""
if self.locale != 0:
_language = self._unpack_language_or_region([self.locale & 0xff, (self.locale & 0xff00) >> 8, ], ord('a'))
_region = self._unpack_language_or_region([(self.locale & 0xff0000) >> 16, (self.locale & 0xff000000) >> 24, ], ord('0'))
return (_language + "-r" + _region) if _region else _language
return "\x00\x00" | Returns the combined language+region string or \x00\x00 for the default locale
:return: | Below is the the instruction that describes the task:
### Input:
Returns the combined language+region string or \x00\x00 for the default locale
:return:
### Response:
def get_language_and_region(self):
"""
Returns the combined language+region string or \x00\x00 for the default locale
:return:
"""
if self.locale != 0:
_language = self._unpack_language_or_region([self.locale & 0xff, (self.locale & 0xff00) >> 8, ], ord('a'))
_region = self._unpack_language_or_region([(self.locale & 0xff0000) >> 16, (self.locale & 0xff000000) >> 24, ], ord('0'))
return (_language + "-r" + _region) if _region else _language
return "\x00\x00" |
def load_favorites(self):
"""Fetches the MAL character favorites page and sets the current character's favorites attributes.
:rtype: :class:`.Character`
:return: Current character object.
"""
character = self.session.session.get(u'http://myanimelist.net/character/' + str(self.id) + u'/' + utilities.urlencode(self.name) + u'/favorites').text
self.set(self.parse_favorites(utilities.get_clean_dom(character)))
return self | Fetches the MAL character favorites page and sets the current character's favorites attributes.
:rtype: :class:`.Character`
:return: Current character object. | Below is the the instruction that describes the task:
### Input:
Fetches the MAL character favorites page and sets the current character's favorites attributes.
:rtype: :class:`.Character`
:return: Current character object.
### Response:
def load_favorites(self):
"""Fetches the MAL character favorites page and sets the current character's favorites attributes.
:rtype: :class:`.Character`
:return: Current character object.
"""
character = self.session.session.get(u'http://myanimelist.net/character/' + str(self.id) + u'/' + utilities.urlencode(self.name) + u'/favorites').text
self.set(self.parse_favorites(utilities.get_clean_dom(character)))
return self |
def check_attr(node, n):
""" Check if ATTR has to be normalized
after this instruction has been translated
to intermediate code.
"""
if len(node.children) > n:
return node.children[n] | Check if ATTR has to be normalized
after this instruction has been translated
to intermediate code. | Below is the the instruction that describes the task:
### Input:
Check if ATTR has to be normalized
after this instruction has been translated
to intermediate code.
### Response:
def check_attr(node, n):
""" Check if ATTR has to be normalized
after this instruction has been translated
to intermediate code.
"""
if len(node.children) > n:
return node.children[n] |
def get_context(self, name, value, attrs):
"""Missing method of django.forms.widgets.Widget class."""
context = {}
context['widget'] = {
'name': name,
'type': 'text',
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': {**self.attrs, **(attrs or {})},
'template_name': self.template_name,
}
return context | Missing method of django.forms.widgets.Widget class. | Below is the the instruction that describes the task:
### Input:
Missing method of django.forms.widgets.Widget class.
### Response:
def get_context(self, name, value, attrs):
"""Missing method of django.forms.widgets.Widget class."""
context = {}
context['widget'] = {
'name': name,
'type': 'text',
'is_hidden': self.is_hidden,
'required': self.is_required,
'value': self.format_value(value),
'attrs': {**self.attrs, **(attrs or {})},
'template_name': self.template_name,
}
return context |
def get_content_macro_by_hash(self, content_id, version, macro_hash, callback=None):
"""
Returns the body of a macro (in storage format) with the given hash.
This resource is primarily used by connect applications that require the body of macro to perform their work.
The hash is generated by connect during render time of the local macro holder and
is usually only relevant during the scope of one request. For optimisation purposes, this hash will usually
live for multiple requests.
Collecting a macro by its hash should now be considered deprecated and will be replaced,
transparently with macroIds. This resource is currently only called from connect addons
which will eventually all use the
{@link #getContentById(com.atlassian.confluence.api.model.content.id.ContentId,
java.util.List, Integer, String)} resource.
To make the migration as seamless as possible, this resource will match macros against a generated hash or a
stored macroId. This will allow add ons to work during the migration period.
:param content_id (string): A string containing the id of the content.
:param version (int): The version of the content which the hash belongs.
:param macro_hash (string): The macroId to find the correct macro
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially.
"""
return self._service_get_request("rest/api/content/{id}/history/{version}/macro/hash/{hash}"
"".format(id=content_id, version=version, hash=macro_hash), callback=callback) | Returns the body of a macro (in storage format) with the given hash.
This resource is primarily used by connect applications that require the body of macro to perform their work.
The hash is generated by connect during render time of the local macro holder and
is usually only relevant during the scope of one request. For optimisation purposes, this hash will usually
live for multiple requests.
Collecting a macro by its hash should now be considered deprecated and will be replaced,
transparently with macroIds. This resource is currently only called from connect addons
which will eventually all use the
{@link #getContentById(com.atlassian.confluence.api.model.content.id.ContentId,
java.util.List, Integer, String)} resource.
To make the migration as seamless as possible, this resource will match macros against a generated hash or a
stored macroId. This will allow add ons to work during the migration period.
:param content_id (string): A string containing the id of the content.
:param version (int): The version of the content which the hash belongs.
:param macro_hash (string): The macroId to find the correct macro
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially. | Below is the the instruction that describes the task:
### Input:
Returns the body of a macro (in storage format) with the given hash.
This resource is primarily used by connect applications that require the body of macro to perform their work.
The hash is generated by connect during render time of the local macro holder and
is usually only relevant during the scope of one request. For optimisation purposes, this hash will usually
live for multiple requests.
Collecting a macro by its hash should now be considered deprecated and will be replaced,
transparently with macroIds. This resource is currently only called from connect addons
which will eventually all use the
{@link #getContentById(com.atlassian.confluence.api.model.content.id.ContentId,
java.util.List, Integer, String)} resource.
To make the migration as seamless as possible, this resource will match macros against a generated hash or a
stored macroId. This will allow add ons to work during the migration period.
:param content_id (string): A string containing the id of the content.
:param version (int): The version of the content which the hash belongs.
:param macro_hash (string): The macroId to find the correct macro
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially.
### Response:
def get_content_macro_by_hash(self, content_id, version, macro_hash, callback=None):
"""
Returns the body of a macro (in storage format) with the given hash.
This resource is primarily used by connect applications that require the body of macro to perform their work.
The hash is generated by connect during render time of the local macro holder and
is usually only relevant during the scope of one request. For optimisation purposes, this hash will usually
live for multiple requests.
Collecting a macro by its hash should now be considered deprecated and will be replaced,
transparently with macroIds. This resource is currently only called from connect addons
which will eventually all use the
{@link #getContentById(com.atlassian.confluence.api.model.content.id.ContentId,
java.util.List, Integer, String)} resource.
To make the migration as seamless as possible, this resource will match macros against a generated hash or a
stored macroId. This will allow add ons to work during the migration period.
:param content_id (string): A string containing the id of the content.
:param version (int): The version of the content which the hash belongs.
:param macro_hash (string): The macroId to find the correct macro
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially.
"""
return self._service_get_request("rest/api/content/{id}/history/{version}/macro/hash/{hash}"
"".format(id=content_id, version=version, hash=macro_hash), callback=callback) |
def add_to_tor(self, protocol):
'''
Returns a Deferred which fires with 'self' after at least one
descriptor has been uploaded. Errback if no descriptor upload
succeeds.
'''
upload_d = _await_descriptor_upload(protocol, self, progress=None, await_all_uploads=False)
# _add_ephemeral_service takes a TorConfig but we don't have
# that here .. and also we're just keeping this for
# backwards-compatability anyway so instead of trying to
# re-use that helper I'm leaving this original code here. So
# this is what it supports and that's that:
ports = ' '.join(map(lambda x: 'Port=' + x.strip(), self._ports))
cmd = 'ADD_ONION %s %s' % (self._key_blob, ports)
ans = yield protocol.queue_command(cmd)
ans = find_keywords(ans.split('\n'))
self.hostname = ans['ServiceID'] + '.onion'
if self._key_blob.startswith('NEW:'):
self.private_key = ans['PrivateKey']
else:
self.private_key = self._key_blob
log.msg('Created hidden-service at', self.hostname)
log.msg("Created '{}', waiting for descriptor uploads.".format(self.hostname))
yield upload_d | Returns a Deferred which fires with 'self' after at least one
descriptor has been uploaded. Errback if no descriptor upload
succeeds. | Below is the the instruction that describes the task:
### Input:
Returns a Deferred which fires with 'self' after at least one
descriptor has been uploaded. Errback if no descriptor upload
succeeds.
### Response:
def add_to_tor(self, protocol):
'''
Returns a Deferred which fires with 'self' after at least one
descriptor has been uploaded. Errback if no descriptor upload
succeeds.
'''
upload_d = _await_descriptor_upload(protocol, self, progress=None, await_all_uploads=False)
# _add_ephemeral_service takes a TorConfig but we don't have
# that here .. and also we're just keeping this for
# backwards-compatability anyway so instead of trying to
# re-use that helper I'm leaving this original code here. So
# this is what it supports and that's that:
ports = ' '.join(map(lambda x: 'Port=' + x.strip(), self._ports))
cmd = 'ADD_ONION %s %s' % (self._key_blob, ports)
ans = yield protocol.queue_command(cmd)
ans = find_keywords(ans.split('\n'))
self.hostname = ans['ServiceID'] + '.onion'
if self._key_blob.startswith('NEW:'):
self.private_key = ans['PrivateKey']
else:
self.private_key = self._key_blob
log.msg('Created hidden-service at', self.hostname)
log.msg("Created '{}', waiting for descriptor uploads.".format(self.hostname))
yield upload_d |
def delete_entry(sender, instance, **kwargs):
"""
Deletes Entry instance corresponding to specified instance.
:param sender: the sending class.
:param instance: the instance being deleted.
"""
from ..models import Entry
Entry.objects.get_for_model(instance)[0].delete() | Deletes Entry instance corresponding to specified instance.
:param sender: the sending class.
:param instance: the instance being deleted. | Below is the the instruction that describes the task:
### Input:
Deletes Entry instance corresponding to specified instance.
:param sender: the sending class.
:param instance: the instance being deleted.
### Response:
def delete_entry(sender, instance, **kwargs):
"""
Deletes Entry instance corresponding to specified instance.
:param sender: the sending class.
:param instance: the instance being deleted.
"""
from ..models import Entry
Entry.objects.get_for_model(instance)[0].delete() |
def write_acceptance_criteria_to_file(self):
"""
Writes current GUI acceptance criteria to criteria.txt or
pmag_criteria.txt depending on data model
"""
crit_list = list(self.acceptance_criteria.keys())
crit_list.sort()
rec = {}
rec['pmag_criteria_code'] = "ACCEPT"
# rec['criteria_definition']=""
rec['criteria_definition'] = "acceptance criteria for study"
rec['er_citation_names'] = "This study"
for crit in crit_list:
if type(self.acceptance_criteria[crit]['value']) == str:
if self.acceptance_criteria[crit]['value'] != "-999" and self.acceptance_criteria[crit]['value'] != "":
rec[crit] = self.acceptance_criteria[crit]['value']
elif type(self.acceptance_criteria[crit]['value']) == int:
if self.acceptance_criteria[crit]['value'] != -999:
rec[crit] = "%.i" % (
self.acceptance_criteria[crit]['value'])
elif type(self.acceptance_criteria[crit]['value']) == float:
if float(self.acceptance_criteria[crit]['value']) == -999:
continue
decimal_points = self.acceptance_criteria[crit]['decimal_points']
if decimal_points != -999:
command = "rec[crit]='%%.%sf'%%(self.acceptance_criteria[crit]['value'])" % (
decimal_points)
exec(command)
else:
rec[crit] = "%e" % (
self.acceptance_criteria[crit]['value'])
pmag.magic_write(os.path.join(self.WD, "pmag_criteria.txt"), [
rec], "pmag_criteria") | Writes current GUI acceptance criteria to criteria.txt or
pmag_criteria.txt depending on data model | Below is the the instruction that describes the task:
### Input:
Writes current GUI acceptance criteria to criteria.txt or
pmag_criteria.txt depending on data model
### Response:
def write_acceptance_criteria_to_file(self):
"""
Writes current GUI acceptance criteria to criteria.txt or
pmag_criteria.txt depending on data model
"""
crit_list = list(self.acceptance_criteria.keys())
crit_list.sort()
rec = {}
rec['pmag_criteria_code'] = "ACCEPT"
# rec['criteria_definition']=""
rec['criteria_definition'] = "acceptance criteria for study"
rec['er_citation_names'] = "This study"
for crit in crit_list:
if type(self.acceptance_criteria[crit]['value']) == str:
if self.acceptance_criteria[crit]['value'] != "-999" and self.acceptance_criteria[crit]['value'] != "":
rec[crit] = self.acceptance_criteria[crit]['value']
elif type(self.acceptance_criteria[crit]['value']) == int:
if self.acceptance_criteria[crit]['value'] != -999:
rec[crit] = "%.i" % (
self.acceptance_criteria[crit]['value'])
elif type(self.acceptance_criteria[crit]['value']) == float:
if float(self.acceptance_criteria[crit]['value']) == -999:
continue
decimal_points = self.acceptance_criteria[crit]['decimal_points']
if decimal_points != -999:
command = "rec[crit]='%%.%sf'%%(self.acceptance_criteria[crit]['value'])" % (
decimal_points)
exec(command)
else:
rec[crit] = "%e" % (
self.acceptance_criteria[crit]['value'])
pmag.magic_write(os.path.join(self.WD, "pmag_criteria.txt"), [
rec], "pmag_criteria") |
def action_rename(self):
"""
Rename a shortcut
"""
# get old and new name from args
old = self.args['<old>']
new = self.args['<new>']
# select the old shortcut
self.db_query('''
SELECT id FROM shortcuts WHERE name=?
''', (old,))
r = self.db_fetch_one()
# error if old doesn't exist
if r == None:
print_err('Shortcut "%s" does not exist!' % old)
return
# error if new exists
if self.shortcut_exists(new):
print_err('Shortcut "%s" already exists!' % new)
return
id = r[0]
# rename in DB
self.db_exec('''
UPDATE shortcuts SET name=? WHERE id=?
''', (new, id))
# show OK message
print_msg('Shortcut "%s" renamed to "%s".' % (old, new)) | Rename a shortcut | Below is the the instruction that describes the task:
### Input:
Rename a shortcut
### Response:
def action_rename(self):
"""
Rename a shortcut
"""
# get old and new name from args
old = self.args['<old>']
new = self.args['<new>']
# select the old shortcut
self.db_query('''
SELECT id FROM shortcuts WHERE name=?
''', (old,))
r = self.db_fetch_one()
# error if old doesn't exist
if r == None:
print_err('Shortcut "%s" does not exist!' % old)
return
# error if new exists
if self.shortcut_exists(new):
print_err('Shortcut "%s" already exists!' % new)
return
id = r[0]
# rename in DB
self.db_exec('''
UPDATE shortcuts SET name=? WHERE id=?
''', (new, id))
# show OK message
print_msg('Shortcut "%s" renamed to "%s".' % (old, new)) |
def _get_pika_properties(properties_in):
"""Return a :class:`pika.spec.BasicProperties` object for a
:class:`rejected.data.Properties` object.
:param dict properties_in: Properties to convert
:rtype: :class:`pika.spec.BasicProperties`
"""
properties = pika.BasicProperties()
for key in properties_in or {}:
if properties_in.get(key) is not None:
setattr(properties, key, properties_in.get(key))
return properties | Return a :class:`pika.spec.BasicProperties` object for a
:class:`rejected.data.Properties` object.
:param dict properties_in: Properties to convert
:rtype: :class:`pika.spec.BasicProperties` | Below is the the instruction that describes the task:
### Input:
Return a :class:`pika.spec.BasicProperties` object for a
:class:`rejected.data.Properties` object.
:param dict properties_in: Properties to convert
:rtype: :class:`pika.spec.BasicProperties`
### Response:
def _get_pika_properties(properties_in):
"""Return a :class:`pika.spec.BasicProperties` object for a
:class:`rejected.data.Properties` object.
:param dict properties_in: Properties to convert
:rtype: :class:`pika.spec.BasicProperties`
"""
properties = pika.BasicProperties()
for key in properties_in or {}:
if properties_in.get(key) is not None:
setattr(properties, key, properties_in.get(key))
return properties |
def timeout(seconds):
"""
Raises a TimeoutError if a function does not terminate within
specified seconds.
"""
def _timeout_error(signal, frame):
raise TimeoutError("Operation did not finish within \
{} seconds".format(seconds))
def timeout_decorator(func):
@wraps(func)
def timeout_wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _timeout_error)
signal.alarm(seconds)
try:
return func(*args, **kwargs)
finally:
signal.alarm(0)
return timeout_wrapper
return timeout_decorator | Raises a TimeoutError if a function does not terminate within
specified seconds. | Below is the the instruction that describes the task:
### Input:
Raises a TimeoutError if a function does not terminate within
specified seconds.
### Response:
def timeout(seconds):
"""
Raises a TimeoutError if a function does not terminate within
specified seconds.
"""
def _timeout_error(signal, frame):
raise TimeoutError("Operation did not finish within \
{} seconds".format(seconds))
def timeout_decorator(func):
@wraps(func)
def timeout_wrapper(*args, **kwargs):
signal.signal(signal.SIGALRM, _timeout_error)
signal.alarm(seconds)
try:
return func(*args, **kwargs)
finally:
signal.alarm(0)
return timeout_wrapper
return timeout_decorator |
def define_from_values(cls, xdtu, ydtu, zdtu, xdtu_0, ydtu_0, zdtu_0):
"""Define class object from from provided values.
Parameters
----------
xdtu : float
XDTU fits keyword value.
ydtu : float
YDTU fits keyword value.
zdtu : float
ZDTU fits keyword value.
xdtu_0 : float
XDTU_0 fits keyword value.
ydtu_0 : float
YDTU_0 fits keyword value.
zdtu_0 : float
ZDTU_0 fits keyword value.
"""
self = DtuConfiguration()
# define DTU variables
self.xdtu = xdtu
self.ydtu = ydtu
self.zdtu = zdtu
self.xdtu_0 = xdtu_0
self.ydtu_0 = ydtu_0
self.zdtu_0 = zdtu_0
return self | Define class object from from provided values.
Parameters
----------
xdtu : float
XDTU fits keyword value.
ydtu : float
YDTU fits keyword value.
zdtu : float
ZDTU fits keyword value.
xdtu_0 : float
XDTU_0 fits keyword value.
ydtu_0 : float
YDTU_0 fits keyword value.
zdtu_0 : float
ZDTU_0 fits keyword value. | Below is the the instruction that describes the task:
### Input:
Define class object from from provided values.
Parameters
----------
xdtu : float
XDTU fits keyword value.
ydtu : float
YDTU fits keyword value.
zdtu : float
ZDTU fits keyword value.
xdtu_0 : float
XDTU_0 fits keyword value.
ydtu_0 : float
YDTU_0 fits keyword value.
zdtu_0 : float
ZDTU_0 fits keyword value.
### Response:
def define_from_values(cls, xdtu, ydtu, zdtu, xdtu_0, ydtu_0, zdtu_0):
"""Define class object from from provided values.
Parameters
----------
xdtu : float
XDTU fits keyword value.
ydtu : float
YDTU fits keyword value.
zdtu : float
ZDTU fits keyword value.
xdtu_0 : float
XDTU_0 fits keyword value.
ydtu_0 : float
YDTU_0 fits keyword value.
zdtu_0 : float
ZDTU_0 fits keyword value.
"""
self = DtuConfiguration()
# define DTU variables
self.xdtu = xdtu
self.ydtu = ydtu
self.zdtu = zdtu
self.xdtu_0 = xdtu_0
self.ydtu_0 = ydtu_0
self.zdtu_0 = zdtu_0
return self |
def __pathToTuple(self, path):
"""
Convert directory or file path to its tuple identifier.
Parameters
----------
path : str
Path to convert. It can look like /, /directory, /directory/ or /directory/filename.
Returns
-------
tup_id : tuple
Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main
directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename`
will be ``None``.
Raises
------
YTFS.PathConvertError
When invalid path is given.
"""
if not path or path.count('/') > 2:
raise YTFS.PathConvertError("Bad path given") # empty or too deep path
try:
split = path.split('/')
except (AttributeError, TypeError):
raise TypeError("Path has to be string") #path is not a string
if split[0]:
raise YTFS.PathConvertError("Path needs to start with '/'") # path doesn't start with '/'.
del split[0]
try:
if not split[-1]: split.pop() # given path ended with '/'.
except IndexError:
raise YTFS.PathConvertError("Bad path given") # at least one element in split should exist at the moment
if len(split) > 2:
raise YTFS.PathConvertError("Path is too deep. Max allowed level is 2") # should happen due to first check, but ...
try:
d = split[0]
except IndexError:
d = None
try:
f = split[1]
except IndexError:
f = None
if not d and f:
raise YTFS.PathConvertError("Bad path given") # filename is present, but directory is not #sheeeeeeiiit
return (d, f) | Convert directory or file path to its tuple identifier.
Parameters
----------
path : str
Path to convert. It can look like /, /directory, /directory/ or /directory/filename.
Returns
-------
tup_id : tuple
Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main
directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename`
will be ``None``.
Raises
------
YTFS.PathConvertError
When invalid path is given. | Below is the the instruction that describes the task:
### Input:
Convert directory or file path to its tuple identifier.
Parameters
----------
path : str
Path to convert. It can look like /, /directory, /directory/ or /directory/filename.
Returns
-------
tup_id : tuple
Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main
directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename`
will be ``None``.
Raises
------
YTFS.PathConvertError
When invalid path is given.
### Response:
def __pathToTuple(self, path):
"""
Convert directory or file path to its tuple identifier.
Parameters
----------
path : str
Path to convert. It can look like /, /directory, /directory/ or /directory/filename.
Returns
-------
tup_id : tuple
Two element tuple identifier of directory/file of (`directory`, `filename`) format. If path leads to main
directory, then both fields of tuple will be ``None``. If path leads to a directory, then field `filename`
will be ``None``.
Raises
------
YTFS.PathConvertError
When invalid path is given.
"""
if not path or path.count('/') > 2:
raise YTFS.PathConvertError("Bad path given") # empty or too deep path
try:
split = path.split('/')
except (AttributeError, TypeError):
raise TypeError("Path has to be string") #path is not a string
if split[0]:
raise YTFS.PathConvertError("Path needs to start with '/'") # path doesn't start with '/'.
del split[0]
try:
if not split[-1]: split.pop() # given path ended with '/'.
except IndexError:
raise YTFS.PathConvertError("Bad path given") # at least one element in split should exist at the moment
if len(split) > 2:
raise YTFS.PathConvertError("Path is too deep. Max allowed level is 2") # should happen due to first check, but ...
try:
d = split[0]
except IndexError:
d = None
try:
f = split[1]
except IndexError:
f = None
if not d and f:
raise YTFS.PathConvertError("Bad path given") # filename is present, but directory is not #sheeeeeeiiit
return (d, f) |
def hacking_import_rules(logical_line, filename, noqa):
r"""Check for imports.
OpenStack HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
Okay: from nova.compute import api
H301: from nova.compute import api, utils
Do not use wildcard import
Do not make relative imports
Examples:
Okay: from os import path
Okay: from os import path as p
Okay: from os import (path as p)
Okay: import os.path
Okay: from nova.compute import rpcapi
Okay: from six.moves.urllib import parse
H303: from os.path import *
H304: from .compute import rpcapi
"""
# TODO(jogo): make the following doctests pass:
# H301: import os, sys
# TODO(mordred: We need to split this into different checks so that they
# can be disabled by command line switches properly
if noqa:
return
split_line = logical_line.split()
split_line_len = len(split_line)
if (split_line_len > 1 and split_line[0] in ('import', 'from') and
not core.is_import_exception(split_line[1])):
pos = logical_line.find(',')
if pos != -1:
if split_line[0] == 'from':
yield pos, "H301: one import per line"
pos = logical_line.find('*')
if pos != -1:
yield pos, "H303: No wildcard (*) import."
return
if split_line_len in (2, 4, 6) and split_line[1] != "__future__":
if 'from' == split_line[0] and split_line_len > 3:
mod = '.'.join((split_line[1], split_line[3]))
if core.is_import_exception(mod):
return
if RE_RELATIVE_IMPORT.search(logical_line):
yield logical_line.find('.'), (
"H304: No relative imports. '%s' is a relative import"
% logical_line)
return | r"""Check for imports.
OpenStack HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
Okay: from nova.compute import api
H301: from nova.compute import api, utils
Do not use wildcard import
Do not make relative imports
Examples:
Okay: from os import path
Okay: from os import path as p
Okay: from os import (path as p)
Okay: import os.path
Okay: from nova.compute import rpcapi
Okay: from six.moves.urllib import parse
H303: from os.path import *
H304: from .compute import rpcapi | Below is the the instruction that describes the task:
### Input:
r"""Check for imports.
OpenStack HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
Okay: from nova.compute import api
H301: from nova.compute import api, utils
Do not use wildcard import
Do not make relative imports
Examples:
Okay: from os import path
Okay: from os import path as p
Okay: from os import (path as p)
Okay: import os.path
Okay: from nova.compute import rpcapi
Okay: from six.moves.urllib import parse
H303: from os.path import *
H304: from .compute import rpcapi
### Response:
def hacking_import_rules(logical_line, filename, noqa):
r"""Check for imports.
OpenStack HACKING guide recommends one import per line:
Do not import more than one module per line
Examples:
Okay: from nova.compute import api
H301: from nova.compute import api, utils
Do not use wildcard import
Do not make relative imports
Examples:
Okay: from os import path
Okay: from os import path as p
Okay: from os import (path as p)
Okay: import os.path
Okay: from nova.compute import rpcapi
Okay: from six.moves.urllib import parse
H303: from os.path import *
H304: from .compute import rpcapi
"""
# TODO(jogo): make the following doctests pass:
# H301: import os, sys
# TODO(mordred: We need to split this into different checks so that they
# can be disabled by command line switches properly
if noqa:
return
split_line = logical_line.split()
split_line_len = len(split_line)
if (split_line_len > 1 and split_line[0] in ('import', 'from') and
not core.is_import_exception(split_line[1])):
pos = logical_line.find(',')
if pos != -1:
if split_line[0] == 'from':
yield pos, "H301: one import per line"
pos = logical_line.find('*')
if pos != -1:
yield pos, "H303: No wildcard (*) import."
return
if split_line_len in (2, 4, 6) and split_line[1] != "__future__":
if 'from' == split_line[0] and split_line_len > 3:
mod = '.'.join((split_line[1], split_line[3]))
if core.is_import_exception(mod):
return
if RE_RELATIVE_IMPORT.search(logical_line):
yield logical_line.find('.'), (
"H304: No relative imports. '%s' is a relative import"
% logical_line)
return |
def findConfigFile(cls, filename):
""" Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
:param filename: (string) name of file to locate
"""
paths = cls.getConfigPaths()
for p in paths:
testPath = os.path.join(p, filename)
if os.path.isfile(testPath):
return os.path.join(p, filename) | Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
:param filename: (string) name of file to locate | Below is the the instruction that describes the task:
### Input:
Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
:param filename: (string) name of file to locate
### Response:
def findConfigFile(cls, filename):
""" Search the configuration path (specified via the NTA_CONF_PATH
environment variable) for the given filename. If found, return the complete
path to the file.
:param filename: (string) name of file to locate
"""
paths = cls.getConfigPaths()
for p in paths:
testPath = os.path.join(p, filename)
if os.path.isfile(testPath):
return os.path.join(p, filename) |
def _stage_from_version(version):
"""return "prd", "stg", or "dev" for the given version string. A value is always returned"""
if version:
m = re.match(r"^(?P<xyz>\d+\.\d+\.\d+)(?P<extra>.*)", version)
if m:
return "stg" if m.group("extra") else "prd"
return "dev" | return "prd", "stg", or "dev" for the given version string. A value is always returned | Below is the the instruction that describes the task:
### Input:
return "prd", "stg", or "dev" for the given version string. A value is always returned
### Response:
def _stage_from_version(version):
"""return "prd", "stg", or "dev" for the given version string. A value is always returned"""
if version:
m = re.match(r"^(?P<xyz>\d+\.\d+\.\d+)(?P<extra>.*)", version)
if m:
return "stg" if m.group("extra") else "prd"
return "dev" |
def _get_session_cookies(session, access_token):
"""Use the access token to get session cookies.
Raises GoogleAuthError if session cookies could not be loaded.
Returns dict of cookies.
"""
headers = {'Authorization': 'Bearer {}'.format(access_token)}
try:
r = session.get(('https://accounts.google.com/accounts/OAuthLogin'
'?source=hangups&issueuberauth=1'), headers=headers)
r.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('OAuthLogin request failed: {}'.format(e))
uberauth = r.text
try:
r = session.get(('https://accounts.google.com/MergeSession?'
'service=mail&'
'continue=http://www.google.com&uberauth={}')
.format(uberauth), headers=headers)
r.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('MergeSession request failed: {}'.format(e))
cookies = session.cookies.get_dict(domain='.google.com')
if cookies == {}:
raise GoogleAuthError('Failed to find session cookies')
return cookies | Use the access token to get session cookies.
Raises GoogleAuthError if session cookies could not be loaded.
Returns dict of cookies. | Below is the the instruction that describes the task:
### Input:
Use the access token to get session cookies.
Raises GoogleAuthError if session cookies could not be loaded.
Returns dict of cookies.
### Response:
def _get_session_cookies(session, access_token):
"""Use the access token to get session cookies.
Raises GoogleAuthError if session cookies could not be loaded.
Returns dict of cookies.
"""
headers = {'Authorization': 'Bearer {}'.format(access_token)}
try:
r = session.get(('https://accounts.google.com/accounts/OAuthLogin'
'?source=hangups&issueuberauth=1'), headers=headers)
r.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('OAuthLogin request failed: {}'.format(e))
uberauth = r.text
try:
r = session.get(('https://accounts.google.com/MergeSession?'
'service=mail&'
'continue=http://www.google.com&uberauth={}')
.format(uberauth), headers=headers)
r.raise_for_status()
except requests.RequestException as e:
raise GoogleAuthError('MergeSession request failed: {}'.format(e))
cookies = session.cookies.get_dict(domain='.google.com')
if cookies == {}:
raise GoogleAuthError('Failed to find session cookies')
return cookies |
def save(self):
"""
:return: save this environment on Ariane server (create or update)
"""
LOGGER.debug("Environment.save")
post_payload = {}
consolidated_osi_id = []
if self.id is not None:
post_payload['environmentID'] = self.id
if self.name is not None:
post_payload['environmentName'] = self.name
if self.description is not None:
post_payload['environmentDescription'] = self.description
if self.color_code is not None:
post_payload['environmentColorCode'] = self.color_code
if self.osi_ids is not None:
consolidated_osi_id = copy.deepcopy(self.osi_ids)
if self.osi_2_rm is not None:
for osi_2_rm in self.osi_2_rm:
if osi_2_rm.id is None:
osi_2_rm.sync()
consolidated_osi_id.remove(osi_2_rm.id)
if self.osi_2_add is not None:
for osi_id_2_add in self.osi_2_add:
if osi_id_2_add.id is None:
osi_id_2_add.save()
consolidated_osi_id.append(osi_id_2_add.id)
post_payload['environmentOSInstancesID'] = consolidated_osi_id
args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}}
response = EnvironmentService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'Environment.save - Problem while saving environment ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.id = response.response_content['environmentID']
if self.osi_2_add is not None:
for osi_2_add in self.osi_2_add:
osi_2_add.sync()
if self.osi_2_rm is not None:
for osi_2_rm in self.osi_2_rm:
osi_2_rm.sync()
self.osi_2_add.clear()
self.osi_2_rm.clear()
self.sync()
return self | :return: save this environment on Ariane server (create or update) | Below is the the instruction that describes the task:
### Input:
:return: save this environment on Ariane server (create or update)
### Response:
def save(self):
"""
:return: save this environment on Ariane server (create or update)
"""
LOGGER.debug("Environment.save")
post_payload = {}
consolidated_osi_id = []
if self.id is not None:
post_payload['environmentID'] = self.id
if self.name is not None:
post_payload['environmentName'] = self.name
if self.description is not None:
post_payload['environmentDescription'] = self.description
if self.color_code is not None:
post_payload['environmentColorCode'] = self.color_code
if self.osi_ids is not None:
consolidated_osi_id = copy.deepcopy(self.osi_ids)
if self.osi_2_rm is not None:
for osi_2_rm in self.osi_2_rm:
if osi_2_rm.id is None:
osi_2_rm.sync()
consolidated_osi_id.remove(osi_2_rm.id)
if self.osi_2_add is not None:
for osi_id_2_add in self.osi_2_add:
if osi_id_2_add.id is None:
osi_id_2_add.save()
consolidated_osi_id.append(osi_id_2_add.id)
post_payload['environmentOSInstancesID'] = consolidated_osi_id
args = {'http_operation': 'POST', 'operation_path': '', 'parameters': {'payload': json.dumps(post_payload)}}
response = EnvironmentService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'Environment.save - Problem while saving environment ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.id = response.response_content['environmentID']
if self.osi_2_add is not None:
for osi_2_add in self.osi_2_add:
osi_2_add.sync()
if self.osi_2_rm is not None:
for osi_2_rm in self.osi_2_rm:
osi_2_rm.sync()
self.osi_2_add.clear()
self.osi_2_rm.clear()
self.sync()
return self |
def _apply_rate(self, max_rate, aggressive=False):
"""
Try to adjust the rate (characters/second)
of the fragments of the list,
so that it does not exceed the given ``max_rate``.
This is done by testing whether some slack
can be borrowed from the fragment before
the faster current one.
If ``aggressive`` is ``True``,
the slack might be retrieved from the fragment after
the faster current one,
if the previous fragment could not contribute enough slack.
"""
self.log(u"Called _apply_rate")
self.log([u" Aggressive: %s", aggressive])
self.log([u" Max rate: %.3f", max_rate])
regular_fragments = list(self.smflist.regular_fragments)
if len(regular_fragments) <= 1:
self.log(u" The list contains at most one regular fragment, returning")
return
faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))]
if len(faster_fragments) == 0:
self.log(u" No regular fragment faster than max rate, returning")
return
self.log_warn(u" Some fragments have rate faster than max rate:")
self.log([u" %s", [i for i, f in faster_fragments]])
self.log(u"Fixing rate for faster fragments...")
for frag_index, fragment in faster_fragments:
self.smflist.fix_fragment_rate(frag_index, max_rate, aggressive=aggressive)
self.log(u"Fixing rate for faster fragments... done")
faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))]
if len(faster_fragments) > 0:
self.log_warn(u" Some fragments still have rate faster than max rate:")
self.log([u" %s", [i for i, f in faster_fragments]]) | Try to adjust the rate (characters/second)
of the fragments of the list,
so that it does not exceed the given ``max_rate``.
This is done by testing whether some slack
can be borrowed from the fragment before
the faster current one.
If ``aggressive`` is ``True``,
the slack might be retrieved from the fragment after
the faster current one,
if the previous fragment could not contribute enough slack. | Below is the the instruction that describes the task:
### Input:
Try to adjust the rate (characters/second)
of the fragments of the list,
so that it does not exceed the given ``max_rate``.
This is done by testing whether some slack
can be borrowed from the fragment before
the faster current one.
If ``aggressive`` is ``True``,
the slack might be retrieved from the fragment after
the faster current one,
if the previous fragment could not contribute enough slack.
### Response:
def _apply_rate(self, max_rate, aggressive=False):
"""
Try to adjust the rate (characters/second)
of the fragments of the list,
so that it does not exceed the given ``max_rate``.
This is done by testing whether some slack
can be borrowed from the fragment before
the faster current one.
If ``aggressive`` is ``True``,
the slack might be retrieved from the fragment after
the faster current one,
if the previous fragment could not contribute enough slack.
"""
self.log(u"Called _apply_rate")
self.log([u" Aggressive: %s", aggressive])
self.log([u" Max rate: %.3f", max_rate])
regular_fragments = list(self.smflist.regular_fragments)
if len(regular_fragments) <= 1:
self.log(u" The list contains at most one regular fragment, returning")
return
faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))]
if len(faster_fragments) == 0:
self.log(u" No regular fragment faster than max rate, returning")
return
self.log_warn(u" Some fragments have rate faster than max rate:")
self.log([u" %s", [i for i, f in faster_fragments]])
self.log(u"Fixing rate for faster fragments...")
for frag_index, fragment in faster_fragments:
self.smflist.fix_fragment_rate(frag_index, max_rate, aggressive=aggressive)
self.log(u"Fixing rate for faster fragments... done")
faster_fragments = [(i, f) for i, f in regular_fragments if (f.rate is not None) and (f.rate >= max_rate + Decimal("0.001"))]
if len(faster_fragments) > 0:
self.log_warn(u" Some fragments still have rate faster than max rate:")
self.log([u" %s", [i for i, f in faster_fragments]]) |
def add_child(self, child):
"""Add a child FSEntry to this FSEntry.
Only FSEntrys with a type of 'directory' can have children.
This does not detect cyclic parent/child relationships, but that will
cause problems.
:param metsrw.fsentry.FSEntry child: FSEntry to add as a child
:return: The newly added child
:raises ValueError: If this FSEntry cannot have children.
:raises ValueError: If the child and the parent are the same
"""
if self.type.lower() != "directory":
raise ValueError("Only directory objects can have children")
if child is self:
raise ValueError("Cannot be a child of itself!")
if child not in self._children:
self._children.append(child)
child.parent = self
return child | Add a child FSEntry to this FSEntry.
Only FSEntrys with a type of 'directory' can have children.
This does not detect cyclic parent/child relationships, but that will
cause problems.
:param metsrw.fsentry.FSEntry child: FSEntry to add as a child
:return: The newly added child
:raises ValueError: If this FSEntry cannot have children.
:raises ValueError: If the child and the parent are the same | Below is the the instruction that describes the task:
### Input:
Add a child FSEntry to this FSEntry.
Only FSEntrys with a type of 'directory' can have children.
This does not detect cyclic parent/child relationships, but that will
cause problems.
:param metsrw.fsentry.FSEntry child: FSEntry to add as a child
:return: The newly added child
:raises ValueError: If this FSEntry cannot have children.
:raises ValueError: If the child and the parent are the same
### Response:
def add_child(self, child):
"""Add a child FSEntry to this FSEntry.
Only FSEntrys with a type of 'directory' can have children.
This does not detect cyclic parent/child relationships, but that will
cause problems.
:param metsrw.fsentry.FSEntry child: FSEntry to add as a child
:return: The newly added child
:raises ValueError: If this FSEntry cannot have children.
:raises ValueError: If the child and the parent are the same
"""
if self.type.lower() != "directory":
raise ValueError("Only directory objects can have children")
if child is self:
raise ValueError("Cannot be a child of itself!")
if child not in self._children:
self._children.append(child)
child.parent = self
return child |
def apply_status_code(self, status_code):
"""
When a trace entity is generated under the http context,
the status code will affect this entity's fault/error/throttle flags.
Flip these flags based on status code.
"""
self._check_ended()
if not status_code:
return
if status_code >= 500:
self.add_fault_flag()
elif status_code == 429:
self.add_throttle_flag()
self.add_error_flag()
elif status_code >= 400:
self.add_error_flag() | When a trace entity is generated under the http context,
the status code will affect this entity's fault/error/throttle flags.
Flip these flags based on status code. | Below is the the instruction that describes the task:
### Input:
When a trace entity is generated under the http context,
the status code will affect this entity's fault/error/throttle flags.
Flip these flags based on status code.
### Response:
def apply_status_code(self, status_code):
"""
When a trace entity is generated under the http context,
the status code will affect this entity's fault/error/throttle flags.
Flip these flags based on status code.
"""
self._check_ended()
if not status_code:
return
if status_code >= 500:
self.add_fault_flag()
elif status_code == 429:
self.add_throttle_flag()
self.add_error_flag()
elif status_code >= 400:
self.add_error_flag() |
def scan(self):
"""Trigger the wifi interface to scan."""
self._logger.info("iface '%s' scans", self.name())
self._wifi_ctrl.scan(self._raw_obj) | Trigger the wifi interface to scan. | Below is the the instruction that describes the task:
### Input:
Trigger the wifi interface to scan.
### Response:
def scan(self):
"""Trigger the wifi interface to scan."""
self._logger.info("iface '%s' scans", self.name())
self._wifi_ctrl.scan(self._raw_obj) |
def register_rpc(name=None):
"""Decorator. Allows registering a function for RPC.
* http://uwsgi.readthedocs.io/en/latest/RPC.html
Example:
.. code-block:: python
@register_rpc()
def expose_me():
do()
:param str|unicode name: RPC function name to associate
with decorated function.
:rtype: callable
"""
def wrapper(func):
func_name = func.__name__
rpc_name = name or func_name
uwsgi.register_rpc(rpc_name, func)
_LOG.debug("Registering '%s' for RPC under '%s' alias ...", func_name, rpc_name)
return func
return wrapper | Decorator. Allows registering a function for RPC.
* http://uwsgi.readthedocs.io/en/latest/RPC.html
Example:
.. code-block:: python
@register_rpc()
def expose_me():
do()
:param str|unicode name: RPC function name to associate
with decorated function.
:rtype: callable | Below is the the instruction that describes the task:
### Input:
Decorator. Allows registering a function for RPC.
* http://uwsgi.readthedocs.io/en/latest/RPC.html
Example:
.. code-block:: python
@register_rpc()
def expose_me():
do()
:param str|unicode name: RPC function name to associate
with decorated function.
:rtype: callable
### Response:
def register_rpc(name=None):
"""Decorator. Allows registering a function for RPC.
* http://uwsgi.readthedocs.io/en/latest/RPC.html
Example:
.. code-block:: python
@register_rpc()
def expose_me():
do()
:param str|unicode name: RPC function name to associate
with decorated function.
:rtype: callable
"""
def wrapper(func):
func_name = func.__name__
rpc_name = name or func_name
uwsgi.register_rpc(rpc_name, func)
_LOG.debug("Registering '%s' for RPC under '%s' alias ...", func_name, rpc_name)
return func
return wrapper |
def escape_vals(vals, escape_numerics=True):
"""
Escapes a list of values to a string, converting to
unicode for safety.
"""
# Ints formatted as floats to disambiguate with counter mode
ints, floats = "%.1f", "%.10f"
escaped = []
for v in vals:
if isinstance(v, np.timedelta64):
v = "'"+str(v)+"'"
elif isinstance(v, np.datetime64):
v = "'"+str(v.astype('datetime64[ns]'))+"'"
elif not isnumeric(v):
v = "'"+unicode(bytes_to_unicode(v))+"'"
else:
if v % 1 == 0:
v = ints % v
else:
v = (floats % v)[:-1]
if escape_numerics:
v = "'"+v+"'"
escaped.append(v)
return escaped | Escapes a list of values to a string, converting to
unicode for safety. | Below is the the instruction that describes the task:
### Input:
Escapes a list of values to a string, converting to
unicode for safety.
### Response:
def escape_vals(vals, escape_numerics=True):
"""
Escapes a list of values to a string, converting to
unicode for safety.
"""
# Ints formatted as floats to disambiguate with counter mode
ints, floats = "%.1f", "%.10f"
escaped = []
for v in vals:
if isinstance(v, np.timedelta64):
v = "'"+str(v)+"'"
elif isinstance(v, np.datetime64):
v = "'"+str(v.astype('datetime64[ns]'))+"'"
elif not isnumeric(v):
v = "'"+unicode(bytes_to_unicode(v))+"'"
else:
if v % 1 == 0:
v = ints % v
else:
v = (floats % v)[:-1]
if escape_numerics:
v = "'"+v+"'"
escaped.append(v)
return escaped |
def add_mapping(agent, prefix, ip):
"""Adds a mapping with a contract.
It has high latency but gives some kind of guarantee."""
return _broadcast(agent, AddMappingManager, RecordType.record_A,
prefix, ip) | Adds a mapping with a contract.
It has high latency but gives some kind of guarantee. | Below is the the instruction that describes the task:
### Input:
Adds a mapping with a contract.
It has high latency but gives some kind of guarantee.
### Response:
def add_mapping(agent, prefix, ip):
"""Adds a mapping with a contract.
It has high latency but gives some kind of guarantee."""
return _broadcast(agent, AddMappingManager, RecordType.record_A,
prefix, ip) |
def _set_flow_rate(pipette, params) -> None:
"""
Set flow rate in uL/mm, to value obtained from command's params.
"""
flow_rate_param = params['flowRate']
if not (flow_rate_param > 0):
raise RuntimeError('Positive flowRate param required')
pipette.flow_rate = {
'aspirate': flow_rate_param,
'dispense': flow_rate_param
} | Set flow rate in uL/mm, to value obtained from command's params. | Below is the the instruction that describes the task:
### Input:
Set flow rate in uL/mm, to value obtained from command's params.
### Response:
def _set_flow_rate(pipette, params) -> None:
"""
Set flow rate in uL/mm, to value obtained from command's params.
"""
flow_rate_param = params['flowRate']
if not (flow_rate_param > 0):
raise RuntimeError('Positive flowRate param required')
pipette.flow_rate = {
'aspirate': flow_rate_param,
'dispense': flow_rate_param
} |
def get_filtered_dfs(lib, expr):
"""
Main: Get all data frames that match the given expression
:return dict: Filenames and data frames (filtered)
"""
logger_dataframes.info("enter get_filtered_dfs")
dfs = {}
tt = None
# Process all lipds files or one lipds file?
specific_files = _check_expr_filename(expr)
# Determine the table type wanted
if "chron" in expr:
tt = "chron"
elif "paleo" in expr:
tt = "paleo"
# Get all filenames of target type.
if tt:
if specific_files:
# The user has specified a single LiPD file to get data frames from.
for file in specific_files:
if file in lib:
lo_meta = lib[file].get_metadata()
lo_dfs = lib[file].get_dfs()
# Only start a search if this lipds file has data frames available. Otherwise, pointless.
if lo_dfs:
# Get list of all matching filenames
filenames = _match_dfs_expr(lo_meta, expr, tt)
# Update our output data frames dictionary
dfs.update(_match_filenames_w_dfs(filenames, lo_dfs))
else:
print("Unable to find LiPD file in Library: {}".format(file))
# Process all LiPD files in the library. A file has not been specified in the expression.
else:
# Loop once on each lipds object in the library
for ln, lo in lib.items():
# Get the
lo_meta = lo.get_metadata()
lo_dfs = lo.get_dfs()
# Only start a search if this lipds file has data frames available. Otherwise, pointless.
if lo_dfs:
# Get list of all matching filenames
filenames = _match_dfs_expr(lo_meta, expr, tt)
# Update our output data frames dictionary
dfs.update(_match_filenames_w_dfs(filenames, lo_dfs))
logger_dataframes.info("exit get_filtered_dfs")
return dfs | Main: Get all data frames that match the given expression
:return dict: Filenames and data frames (filtered) | Below is the the instruction that describes the task:
### Input:
Main: Get all data frames that match the given expression
:return dict: Filenames and data frames (filtered)
### Response:
def get_filtered_dfs(lib, expr):
"""
Main: Get all data frames that match the given expression
:return dict: Filenames and data frames (filtered)
"""
logger_dataframes.info("enter get_filtered_dfs")
dfs = {}
tt = None
# Process all lipds files or one lipds file?
specific_files = _check_expr_filename(expr)
# Determine the table type wanted
if "chron" in expr:
tt = "chron"
elif "paleo" in expr:
tt = "paleo"
# Get all filenames of target type.
if tt:
if specific_files:
# The user has specified a single LiPD file to get data frames from.
for file in specific_files:
if file in lib:
lo_meta = lib[file].get_metadata()
lo_dfs = lib[file].get_dfs()
# Only start a search if this lipds file has data frames available. Otherwise, pointless.
if lo_dfs:
# Get list of all matching filenames
filenames = _match_dfs_expr(lo_meta, expr, tt)
# Update our output data frames dictionary
dfs.update(_match_filenames_w_dfs(filenames, lo_dfs))
else:
print("Unable to find LiPD file in Library: {}".format(file))
# Process all LiPD files in the library. A file has not been specified in the expression.
else:
# Loop once on each lipds object in the library
for ln, lo in lib.items():
# Get the
lo_meta = lo.get_metadata()
lo_dfs = lo.get_dfs()
# Only start a search if this lipds file has data frames available. Otherwise, pointless.
if lo_dfs:
# Get list of all matching filenames
filenames = _match_dfs_expr(lo_meta, expr, tt)
# Update our output data frames dictionary
dfs.update(_match_filenames_w_dfs(filenames, lo_dfs))
logger_dataframes.info("exit get_filtered_dfs")
return dfs |
def _get_type(self, s):
"""
Converts a string from Scratch to its proper type in Python. Expects a
string with its delimiting quotes in place. Returns either a string,
int or float.
"""
# TODO: what if the number is bigger than an int or float?
if s.startswith('"') and s.endswith('"'):
return s[1:-1]
elif s.find('.') != -1:
return float(s)
else:
return int(s) | Converts a string from Scratch to its proper type in Python. Expects a
string with its delimiting quotes in place. Returns either a string,
int or float. | Below is the the instruction that describes the task:
### Input:
Converts a string from Scratch to its proper type in Python. Expects a
string with its delimiting quotes in place. Returns either a string,
int or float.
### Response:
def _get_type(self, s):
"""
Converts a string from Scratch to its proper type in Python. Expects a
string with its delimiting quotes in place. Returns either a string,
int or float.
"""
# TODO: what if the number is bigger than an int or float?
if s.startswith('"') and s.endswith('"'):
return s[1:-1]
elif s.find('.') != -1:
return float(s)
else:
return int(s) |
def check_completeness_table(completeness_table, catalogue):
'''
Check to ensure completeness table is in the correct format
`completeness_table = np.array([[year_, mag_i]]) for i in number of bins`
:param np.ndarray completeness_table:
Completeness table in format [[year, mag]]
:param catalogue:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class
:returns:
Correct completeness table
'''
if isinstance(completeness_table, np.ndarray):
assert np.shape(completeness_table)[1] == 2
return completeness_table
elif isinstance(completeness_table, list):
# Assuming list has only two elements
assert len(completeness_table) == 2
return np.array([[completeness_table[0], completeness_table[1]]])
else:
# Accepts the minimum magnitude and earliest year of the catalogue
return np.array([[np.min(catalogue.data['year']),
np.min(catalogue.data['magnitude'])]]) | Check to ensure completeness table is in the correct format
`completeness_table = np.array([[year_, mag_i]]) for i in number of bins`
:param np.ndarray completeness_table:
Completeness table in format [[year, mag]]
:param catalogue:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class
:returns:
Correct completeness table | Below is the the instruction that describes the task:
### Input:
Check to ensure completeness table is in the correct format
`completeness_table = np.array([[year_, mag_i]]) for i in number of bins`
:param np.ndarray completeness_table:
Completeness table in format [[year, mag]]
:param catalogue:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class
:returns:
Correct completeness table
### Response:
def check_completeness_table(completeness_table, catalogue):
'''
Check to ensure completeness table is in the correct format
`completeness_table = np.array([[year_, mag_i]]) for i in number of bins`
:param np.ndarray completeness_table:
Completeness table in format [[year, mag]]
:param catalogue:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class
:returns:
Correct completeness table
'''
if isinstance(completeness_table, np.ndarray):
assert np.shape(completeness_table)[1] == 2
return completeness_table
elif isinstance(completeness_table, list):
# Assuming list has only two elements
assert len(completeness_table) == 2
return np.array([[completeness_table[0], completeness_table[1]]])
else:
# Accepts the minimum magnitude and earliest year of the catalogue
return np.array([[np.min(catalogue.data['year']),
np.min(catalogue.data['magnitude'])]]) |
def remove_role_from_user(user, role):
"""
Remove a role from a user.
"""
user = _query_to_user(user)
role = _query_to_role(role)
if click.confirm(f'Are you sure you want to remove {role!r} from {user!r}?'):
user.roles.remove(role)
user_manager.save(user, commit=True)
click.echo(f'Successfully removed {role!r} from {user!r}')
else:
click.echo('Cancelled.') | Remove a role from a user. | Below is the the instruction that describes the task:
### Input:
Remove a role from a user.
### Response:
def remove_role_from_user(user, role):
"""
Remove a role from a user.
"""
user = _query_to_user(user)
role = _query_to_role(role)
if click.confirm(f'Are you sure you want to remove {role!r} from {user!r}?'):
user.roles.remove(role)
user_manager.save(user, commit=True)
click.echo(f'Successfully removed {role!r} from {user!r}')
else:
click.echo('Cancelled.') |
def _extract_table_root(d, current, pc):
"""
Extract data from the root level of a paleoData table.
:param dict d: paleoData table
:param dict current: Current root data
:param str pc: paleoData or chronData
:return dict current: Current root data
"""
logger_ts.info("enter extract_table_root")
try:
for k, v in d.items():
if isinstance(v, str):
current[pc + '_' + k] = v
except Exception as e:
logger_ts.error("extract_table_root: {}".format(e))
return current | Extract data from the root level of a paleoData table.
:param dict d: paleoData table
:param dict current: Current root data
:param str pc: paleoData or chronData
:return dict current: Current root data | Below is the the instruction that describes the task:
### Input:
Extract data from the root level of a paleoData table.
:param dict d: paleoData table
:param dict current: Current root data
:param str pc: paleoData or chronData
:return dict current: Current root data
### Response:
def _extract_table_root(d, current, pc):
"""
Extract data from the root level of a paleoData table.
:param dict d: paleoData table
:param dict current: Current root data
:param str pc: paleoData or chronData
:return dict current: Current root data
"""
logger_ts.info("enter extract_table_root")
try:
for k, v in d.items():
if isinstance(v, str):
current[pc + '_' + k] = v
except Exception as e:
logger_ts.error("extract_table_root: {}".format(e))
return current |
def set_urn(self,urn):
"""
Change the CTS URN of the author or adds a new one (if no URN is assigned).
"""
Type = self.session.get_class(surf.ns.ECRM['E55_Type'])
Identifier = self.session.get_class(surf.ns.ECRM['E42_Identifier'])
id_uri = "%s/cts_urn"%str(self.subject)
try:
id = Identifier(id_uri)
id.rdfs_label = Literal(urn)
id.ecrm_P2_has_type = Type(BASE_URI_TYPES % "CTS_URN")
id.save()
return True
except Exception, e:
raise e | Change the CTS URN of the author or adds a new one (if no URN is assigned). | Below is the the instruction that describes the task:
### Input:
Change the CTS URN of the author or adds a new one (if no URN is assigned).
### Response:
def set_urn(self,urn):
"""
Change the CTS URN of the author or adds a new one (if no URN is assigned).
"""
Type = self.session.get_class(surf.ns.ECRM['E55_Type'])
Identifier = self.session.get_class(surf.ns.ECRM['E42_Identifier'])
id_uri = "%s/cts_urn"%str(self.subject)
try:
id = Identifier(id_uri)
id.rdfs_label = Literal(urn)
id.ecrm_P2_has_type = Type(BASE_URI_TYPES % "CTS_URN")
id.save()
return True
except Exception, e:
raise e |
def _create_scsi_devices(scsi_devices):
'''
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SCSI controllers
scsi_devices:
List of SCSI device properties
'''
keys = range(-1000, -1050, -1)
scsi_specs = []
if scsi_devices:
devs = [scsi['adapter'] for scsi in scsi_devices]
log.trace('Creating SCSI devices %s', devs)
# unitNumber for disk attachment, 0:0 1st 0 is the controller busNumber,
# 2nd is the unitNumber
for (key, scsi_controller) in zip(keys, scsi_devices):
# create the SCSI controller
scsi_spec = _apply_scsi_controller(scsi_controller['adapter'],
scsi_controller['type'],
scsi_controller['bus_sharing'],
key,
scsi_controller['bus_number'],
'add')
scsi_specs.append(scsi_spec)
return scsi_specs | Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SCSI controllers
scsi_devices:
List of SCSI device properties | Below is the the instruction that describes the task:
### Input:
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SCSI controllers
scsi_devices:
List of SCSI device properties
### Response:
def _create_scsi_devices(scsi_devices):
'''
Returns a list of vim.vm.device.VirtualDeviceSpec objects representing
SCSI controllers
scsi_devices:
List of SCSI device properties
'''
keys = range(-1000, -1050, -1)
scsi_specs = []
if scsi_devices:
devs = [scsi['adapter'] for scsi in scsi_devices]
log.trace('Creating SCSI devices %s', devs)
# unitNumber for disk attachment, 0:0 1st 0 is the controller busNumber,
# 2nd is the unitNumber
for (key, scsi_controller) in zip(keys, scsi_devices):
# create the SCSI controller
scsi_spec = _apply_scsi_controller(scsi_controller['adapter'],
scsi_controller['type'],
scsi_controller['bus_sharing'],
key,
scsi_controller['bus_number'],
'add')
scsi_specs.append(scsi_spec)
return scsi_specs |
def last_modified(self) -> Optional[datetime.datetime]:
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self._headers.get(hdrs.LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None | The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object. | Below is the the instruction that describes the task:
### Input:
The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
### Response:
def last_modified(self) -> Optional[datetime.datetime]:
"""The value of Last-Modified HTTP header, or None.
This header is represented as a `datetime` object.
"""
httpdate = self._headers.get(hdrs.LAST_MODIFIED)
if httpdate is not None:
timetuple = parsedate(httpdate)
if timetuple is not None:
return datetime.datetime(*timetuple[:6],
tzinfo=datetime.timezone.utc)
return None |
def send(self, topic, value=None, timeout=60, key=None, partition=None, timestamp_ms=None):
"""Publish a message to a topic.
- ``topic`` (str): topic where the message will be published
- ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer.
If value is None, key is required and message acts as a `delete`.
- ``timeout``
- ``key``: a key to associate with the message. Can be used to determine which partition
to send the message to. If partition is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same partition (but if key is None,
partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer.
- ``partition`` (int): optionally specify a partition.
If not set, the partition will be selected using the configured `partitioner`.
- ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp.
Defaults to current time.
"""
future = self.producer.send(topic, value=value, key=key, partition=partition, timestamp_ms=timestamp_ms)
future.get(timeout=timeout) | Publish a message to a topic.
- ``topic`` (str): topic where the message will be published
- ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer.
If value is None, key is required and message acts as a `delete`.
- ``timeout``
- ``key``: a key to associate with the message. Can be used to determine which partition
to send the message to. If partition is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same partition (but if key is None,
partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer.
- ``partition`` (int): optionally specify a partition.
If not set, the partition will be selected using the configured `partitioner`.
- ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp.
Defaults to current time. | Below is the the instruction that describes the task:
### Input:
Publish a message to a topic.
- ``topic`` (str): topic where the message will be published
- ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer.
If value is None, key is required and message acts as a `delete`.
- ``timeout``
- ``key``: a key to associate with the message. Can be used to determine which partition
to send the message to. If partition is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same partition (but if key is None,
partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer.
- ``partition`` (int): optionally specify a partition.
If not set, the partition will be selected using the configured `partitioner`.
- ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp.
Defaults to current time.
### Response:
def send(self, topic, value=None, timeout=60, key=None, partition=None, timestamp_ms=None):
"""Publish a message to a topic.
- ``topic`` (str): topic where the message will be published
- ``value``: message value. Must be type bytes, or be serializable to bytes via configured value_serializer.
If value is None, key is required and message acts as a `delete`.
- ``timeout``
- ``key``: a key to associate with the message. Can be used to determine which partition
to send the message to. If partition is None (and producer's partitioner config is left as default),
then messages with the same key will be delivered to the same partition (but if key is None,
partition is chosen randomly). Must be type bytes, or be serializable to bytes via configured key_serializer.
- ``partition`` (int): optionally specify a partition.
If not set, the partition will be selected using the configured `partitioner`.
- ``timestamp_ms`` (int): epoch milliseconds (from Jan 1 1970 UTC) to use as the message timestamp.
Defaults to current time.
"""
future = self.producer.send(topic, value=value, key=key, partition=partition, timestamp_ms=timestamp_ms)
future.get(timeout=timeout) |
def build_cpp(build_context, target, compiler_config, workspace_dir):
"""Compile and link a C++ binary for `target`."""
rmtree(workspace_dir)
binary = join(*split(target.name))
objects = link_cpp_artifacts(build_context, target, workspace_dir, True)
buildenv_workspace = build_context.conf.host_to_buildenv_path(
workspace_dir)
objects.extend(compile_cc(
build_context, compiler_config, target.props.in_buildenv,
get_source_files(target, build_context), workspace_dir,
buildenv_workspace, target.props.cmd_env))
bin_file = join(buildenv_workspace, binary)
link_cmd = (
[compiler_config.linker, '-o', bin_file] +
objects + compiler_config.link_flags)
build_context.run_in_buildenv(
target.props.in_buildenv, link_cmd, target.props.cmd_env)
target.artifacts.add(AT.binary, relpath(join(workspace_dir, binary),
build_context.conf.project_root), binary) | Compile and link a C++ binary for `target`. | Below is the the instruction that describes the task:
### Input:
Compile and link a C++ binary for `target`.
### Response:
def build_cpp(build_context, target, compiler_config, workspace_dir):
"""Compile and link a C++ binary for `target`."""
rmtree(workspace_dir)
binary = join(*split(target.name))
objects = link_cpp_artifacts(build_context, target, workspace_dir, True)
buildenv_workspace = build_context.conf.host_to_buildenv_path(
workspace_dir)
objects.extend(compile_cc(
build_context, compiler_config, target.props.in_buildenv,
get_source_files(target, build_context), workspace_dir,
buildenv_workspace, target.props.cmd_env))
bin_file = join(buildenv_workspace, binary)
link_cmd = (
[compiler_config.linker, '-o', bin_file] +
objects + compiler_config.link_flags)
build_context.run_in_buildenv(
target.props.in_buildenv, link_cmd, target.props.cmd_env)
target.artifacts.add(AT.binary, relpath(join(workspace_dir, binary),
build_context.conf.project_root), binary) |
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.fullpath
query_string = request.environ.get('QUERY_STRING')
if query_string:
path += '?' + to_native(query_string)
return path | Return the current relative path including the query string.
Eg: “/foo/bar/?page=1” | Below is the the instruction that describes the task:
### Input:
Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
### Response:
def get_full_path(request):
"""Return the current relative path including the query string.
Eg: “/foo/bar/?page=1”
"""
path = request.fullpath
query_string = request.environ.get('QUERY_STRING')
if query_string:
path += '?' + to_native(query_string)
return path |
def load_tile_lowres(self, tile):
'''load a lower resolution tile from cache to fill in a
map while waiting for a higher resolution tile'''
if tile.zoom == self.min_zoom:
return None
# find the equivalent lower res tile
(lat,lon) = tile.coord()
width2 = TILES_WIDTH
height2 = TILES_HEIGHT
for zoom2 in range(tile.zoom-1, self.min_zoom-1, -1):
width2 //= 2
height2 //= 2
if width2 == 0 or height2 == 0:
break
tile_info = self.coord_to_tile(lat, lon, zoom2)
# see if its in the tile cache
key = tile_info.key()
if key in self._tile_cache:
img = self._tile_cache[key]
if np.array_equal(img, np.array(self._unavailable)):
continue
else:
path = self.tile_to_path(tile_info)
img = cv2.imread(path)
if img is None:
continue
# add it to the tile cache
self._tile_cache[key] = img
while len(self._tile_cache) > self.cache_size:
self._tile_cache.popitem(0)
# copy out the quadrant we want
availx = min(TILES_WIDTH - tile_info.offsetx, width2)
availy = min(TILES_HEIGHT - tile_info.offsety, height2)
if availx != width2 or availy != height2:
continue
roi = img[tile_info.offsety:tile_info.offsety+height2, tile_info.offsetx:tile_info.offsetx+width2]
# and scale it
scaled = cv2.resize(roi, (TILES_HEIGHT,TILES_WIDTH))
#cv.Rectangle(scaled, (0,0), (255,255), (0,255,0), 1)
return scaled
return None | load a lower resolution tile from cache to fill in a
map while waiting for a higher resolution tile | Below is the the instruction that describes the task:
### Input:
load a lower resolution tile from cache to fill in a
map while waiting for a higher resolution tile
### Response:
def load_tile_lowres(self, tile):
'''load a lower resolution tile from cache to fill in a
map while waiting for a higher resolution tile'''
if tile.zoom == self.min_zoom:
return None
# find the equivalent lower res tile
(lat,lon) = tile.coord()
width2 = TILES_WIDTH
height2 = TILES_HEIGHT
for zoom2 in range(tile.zoom-1, self.min_zoom-1, -1):
width2 //= 2
height2 //= 2
if width2 == 0 or height2 == 0:
break
tile_info = self.coord_to_tile(lat, lon, zoom2)
# see if its in the tile cache
key = tile_info.key()
if key in self._tile_cache:
img = self._tile_cache[key]
if np.array_equal(img, np.array(self._unavailable)):
continue
else:
path = self.tile_to_path(tile_info)
img = cv2.imread(path)
if img is None:
continue
# add it to the tile cache
self._tile_cache[key] = img
while len(self._tile_cache) > self.cache_size:
self._tile_cache.popitem(0)
# copy out the quadrant we want
availx = min(TILES_WIDTH - tile_info.offsetx, width2)
availy = min(TILES_HEIGHT - tile_info.offsety, height2)
if availx != width2 or availy != height2:
continue
roi = img[tile_info.offsety:tile_info.offsety+height2, tile_info.offsetx:tile_info.offsetx+width2]
# and scale it
scaled = cv2.resize(roi, (TILES_HEIGHT,TILES_WIDTH))
#cv.Rectangle(scaled, (0,0), (255,255), (0,255,0), 1)
return scaled
return None |
def self_if_parameters(func):
"""
If any parameter is given, the method's binded object is returned after
executing the function. Else the function's result is returned.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if args or kwargs:
return self
else:
return result
return wrapper | If any parameter is given, the method's binded object is returned after
executing the function. Else the function's result is returned. | Below is the the instruction that describes the task:
### Input:
If any parameter is given, the method's binded object is returned after
executing the function. Else the function's result is returned.
### Response:
def self_if_parameters(func):
"""
If any parameter is given, the method's binded object is returned after
executing the function. Else the function's result is returned.
"""
@wraps(func)
def wrapper(self, *args, **kwargs):
result = func(self, *args, **kwargs)
if args or kwargs:
return self
else:
return result
return wrapper |
def _parse_info(line):
"""
The output can be:
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 r:17241)]
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 t:10~3)]
"""
re_info = re.compile(
r'\[(?P<name>\w+).(?P<ver>.*) ' +
r'\((?P<rfm1name>\w+) (\w+):(?P<rfm1freq>\d+) ' +
r'(?P<rfm1mode>.*)\)\]')
info = {
'name': None,
'version': None,
'rfm1name': None,
'rfm1frequency': None,
'rfm1datarate': None,
'rfm1toggleinterval': None,
'rfm1togglemask': None,
}
match = re_info.match(line)
if match:
info['name'] = match.group('name')
info['version'] = match.group('ver')
info['rfm1name'] = match.group('rfm1name')
info['rfm1frequency'] = match.group('rfm1freq')
values = match.group('rfm1mode').split(':')
if values[0] == 'r':
info['rfm1datarate'] = values[1]
elif values[0] == 't':
toggle = values[1].split('~')
info['rfm1toggleinterval'] = toggle[0]
info['rfm1togglemask'] = toggle[1]
return info | The output can be:
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 r:17241)]
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 t:10~3)] | Below is the the instruction that describes the task:
### Input:
The output can be:
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 r:17241)]
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 t:10~3)]
### Response:
def _parse_info(line):
"""
The output can be:
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 r:17241)]
- [LaCrosseITPlusReader.10.1s (RFM12B f:0 t:10~3)]
"""
re_info = re.compile(
r'\[(?P<name>\w+).(?P<ver>.*) ' +
r'\((?P<rfm1name>\w+) (\w+):(?P<rfm1freq>\d+) ' +
r'(?P<rfm1mode>.*)\)\]')
info = {
'name': None,
'version': None,
'rfm1name': None,
'rfm1frequency': None,
'rfm1datarate': None,
'rfm1toggleinterval': None,
'rfm1togglemask': None,
}
match = re_info.match(line)
if match:
info['name'] = match.group('name')
info['version'] = match.group('ver')
info['rfm1name'] = match.group('rfm1name')
info['rfm1frequency'] = match.group('rfm1freq')
values = match.group('rfm1mode').split(':')
if values[0] == 'r':
info['rfm1datarate'] = values[1]
elif values[0] == 't':
toggle = values[1].split('~')
info['rfm1toggleinterval'] = toggle[0]
info['rfm1togglemask'] = toggle[1]
return info |
def _on_change(self):
"""Callback if any of the values are changed."""
font = self.__generate_font_tuple()
self._example_label.configure(font=font) | Callback if any of the values are changed. | Below is the the instruction that describes the task:
### Input:
Callback if any of the values are changed.
### Response:
def _on_change(self):
"""Callback if any of the values are changed."""
font = self.__generate_font_tuple()
self._example_label.configure(font=font) |
def _get_data(self):
"""Process the IGRA2 text file for observations at site_id matching time.
Return:
-------
:class: `pandas.DataFrame` containing the body data.
:class: `pandas.DataFrame` containing the header data.
"""
# Split the list of times into begin and end dates. If only
# one date is supplied, set both begin and end dates equal to that date.
body, header, dates_long, dates = self._get_data_raw()
params = self._get_fwf_params()
df_body = pd.read_fwf(StringIO(body), **params['body'])
df_header = pd.read_fwf(StringIO(header), **params['header'])
df_body['date'] = dates_long
df_body = self._clean_body_df(df_body)
df_header = self._clean_header_df(df_header)
df_header['date'] = dates
return df_body, df_header | Process the IGRA2 text file for observations at site_id matching time.
Return:
-------
:class: `pandas.DataFrame` containing the body data.
:class: `pandas.DataFrame` containing the header data. | Below is the the instruction that describes the task:
### Input:
Process the IGRA2 text file for observations at site_id matching time.
Return:
-------
:class: `pandas.DataFrame` containing the body data.
:class: `pandas.DataFrame` containing the header data.
### Response:
def _get_data(self):
"""Process the IGRA2 text file for observations at site_id matching time.
Return:
-------
:class: `pandas.DataFrame` containing the body data.
:class: `pandas.DataFrame` containing the header data.
"""
# Split the list of times into begin and end dates. If only
# one date is supplied, set both begin and end dates equal to that date.
body, header, dates_long, dates = self._get_data_raw()
params = self._get_fwf_params()
df_body = pd.read_fwf(StringIO(body), **params['body'])
df_header = pd.read_fwf(StringIO(header), **params['header'])
df_body['date'] = dates_long
df_body = self._clean_body_df(df_body)
df_header = self._clean_header_df(df_header)
df_header['date'] = dates
return df_body, df_header |
def StopService(service_name, service_binary_name=None):
"""Stop a Windows service with the given name.
Args:
service_name: string The name of the service to be stopped.
service_binary_name: string If given, also kill this binary as a best effort
fallback solution.
"""
# QueryServiceStatus returns: scvType, svcState, svcControls, err,
# svcErr, svcCP, svcWH
try:
status = win32serviceutil.QueryServiceStatus(service_name)[1]
except pywintypes.error as e:
if getattr(e, "winerror", None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST:
logging.debug("Tried to stop '%s', but the service is not installed.",
service_name)
else:
logging.exception("Unable to query status of service '%s':", service_name)
return
for _ in range(20):
if status == win32service.SERVICE_STOPPED:
break
elif status != win32service.SERVICE_STOP_PENDING:
try:
win32serviceutil.StopService(service_name)
except pywintypes.error:
logging.exception("Unable to stop service '%s':", service_name)
time.sleep(1)
status = win32serviceutil.QueryServiceStatus(service_name)[1]
if status == win32service.SERVICE_STOPPED:
logging.info("Service '%s' stopped.", service_name)
return
elif not service_binary_name:
return
# Taskkill will fail on systems predating Windows XP, this is a best
# effort fallback solution.
output = subprocess.check_output(
["taskkill", "/im", "%s*" % service_binary_name, "/f"],
shell=True,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
logging.debug("%s", output)
# Sleep a bit to ensure that process really quits.
time.sleep(2) | Stop a Windows service with the given name.
Args:
service_name: string The name of the service to be stopped.
service_binary_name: string If given, also kill this binary as a best effort
fallback solution. | Below is the the instruction that describes the task:
### Input:
Stop a Windows service with the given name.
Args:
service_name: string The name of the service to be stopped.
service_binary_name: string If given, also kill this binary as a best effort
fallback solution.
### Response:
def StopService(service_name, service_binary_name=None):
"""Stop a Windows service with the given name.
Args:
service_name: string The name of the service to be stopped.
service_binary_name: string If given, also kill this binary as a best effort
fallback solution.
"""
# QueryServiceStatus returns: scvType, svcState, svcControls, err,
# svcErr, svcCP, svcWH
try:
status = win32serviceutil.QueryServiceStatus(service_name)[1]
except pywintypes.error as e:
if getattr(e, "winerror", None) == winerror.ERROR_SERVICE_DOES_NOT_EXIST:
logging.debug("Tried to stop '%s', but the service is not installed.",
service_name)
else:
logging.exception("Unable to query status of service '%s':", service_name)
return
for _ in range(20):
if status == win32service.SERVICE_STOPPED:
break
elif status != win32service.SERVICE_STOP_PENDING:
try:
win32serviceutil.StopService(service_name)
except pywintypes.error:
logging.exception("Unable to stop service '%s':", service_name)
time.sleep(1)
status = win32serviceutil.QueryServiceStatus(service_name)[1]
if status == win32service.SERVICE_STOPPED:
logging.info("Service '%s' stopped.", service_name)
return
elif not service_binary_name:
return
# Taskkill will fail on systems predating Windows XP, this is a best
# effort fallback solution.
output = subprocess.check_output(
["taskkill", "/im", "%s*" % service_binary_name, "/f"],
shell=True,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
logging.debug("%s", output)
# Sleep a bit to ensure that process really quits.
time.sleep(2) |
def path_is_inside(path, dirname):
"""Return True if path is under dirname."""
path = os.path.abspath(path)
dirname = os.path.abspath(dirname)
while len(path) >= len(dirname):
if path == dirname:
return True
newpath = os.path.dirname(path)
if newpath == path:
return False
path = newpath
return False | Return True if path is under dirname. | Below is the the instruction that describes the task:
### Input:
Return True if path is under dirname.
### Response:
def path_is_inside(path, dirname):
"""Return True if path is under dirname."""
path = os.path.abspath(path)
dirname = os.path.abspath(dirname)
while len(path) >= len(dirname):
if path == dirname:
return True
newpath = os.path.dirname(path)
if newpath == path:
return False
path = newpath
return False |
def get_defaults_file(*a, **kw):
"""Get a file object with YAML data of configuration defaults.
Arguments are passed through to :func:`get_defaults_str`.
"""
fd = StringIO()
fd.write(get_defaults_str(*a, **kw))
fd.seek(0)
return fd | Get a file object with YAML data of configuration defaults.
Arguments are passed through to :func:`get_defaults_str`. | Below is the the instruction that describes the task:
### Input:
Get a file object with YAML data of configuration defaults.
Arguments are passed through to :func:`get_defaults_str`.
### Response:
def get_defaults_file(*a, **kw):
"""Get a file object with YAML data of configuration defaults.
Arguments are passed through to :func:`get_defaults_str`.
"""
fd = StringIO()
fd.write(get_defaults_str(*a, **kw))
fd.seek(0)
return fd |
def areas_of_code(git_enrich, in_conn, out_conn, block_size=100):
"""Build and index for areas of code from a given Perceval RAW index.
:param block_size: size of items block.
:param git_enrich: GitEnrich object to deal with SortingHat affiliations.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:return: number of documents written in ElasticSearch enriched index.
"""
aoc = AreasOfCode(in_connector=in_conn, out_connector=out_conn, block_size=block_size,
git_enrich=git_enrich)
ndocs = aoc.analyze()
return ndocs | Build and index for areas of code from a given Perceval RAW index.
:param block_size: size of items block.
:param git_enrich: GitEnrich object to deal with SortingHat affiliations.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:return: number of documents written in ElasticSearch enriched index. | Below is the the instruction that describes the task:
### Input:
Build and index for areas of code from a given Perceval RAW index.
:param block_size: size of items block.
:param git_enrich: GitEnrich object to deal with SortingHat affiliations.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:return: number of documents written in ElasticSearch enriched index.
### Response:
def areas_of_code(git_enrich, in_conn, out_conn, block_size=100):
"""Build and index for areas of code from a given Perceval RAW index.
:param block_size: size of items block.
:param git_enrich: GitEnrich object to deal with SortingHat affiliations.
:param in_conn: ESPandasConnector to read from.
:param out_conn: ESPandasConnector to write to.
:return: number of documents written in ElasticSearch enriched index.
"""
aoc = AreasOfCode(in_connector=in_conn, out_connector=out_conn, block_size=block_size,
git_enrich=git_enrich)
ndocs = aoc.analyze()
return ndocs |
def fetch_async(self, limit=None, **q_options):
"""Fetch a list of query results, up to a limit.
This is the asynchronous version of Query.fetch().
"""
if limit is None:
default_options = self._make_options(q_options)
if default_options is not None and default_options.limit is not None:
limit = default_options.limit
else:
limit = _MAX_LIMIT
q_options['limit'] = limit
q_options.setdefault('batch_size', limit)
if self._needs_multi_query():
return self.map_async(None, **q_options)
# Optimization using direct batches.
options = self._make_options(q_options)
qry = self._fix_namespace()
return qry._run_to_list([], options=options) | Fetch a list of query results, up to a limit.
This is the asynchronous version of Query.fetch(). | Below is the the instruction that describes the task:
### Input:
Fetch a list of query results, up to a limit.
This is the asynchronous version of Query.fetch().
### Response:
def fetch_async(self, limit=None, **q_options):
"""Fetch a list of query results, up to a limit.
This is the asynchronous version of Query.fetch().
"""
if limit is None:
default_options = self._make_options(q_options)
if default_options is not None and default_options.limit is not None:
limit = default_options.limit
else:
limit = _MAX_LIMIT
q_options['limit'] = limit
q_options.setdefault('batch_size', limit)
if self._needs_multi_query():
return self.map_async(None, **q_options)
# Optimization using direct batches.
options = self._make_options(q_options)
qry = self._fix_namespace()
return qry._run_to_list([], options=options) |
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text) | Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented. | Below is the the instruction that describes the task:
### Input:
Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
### Response:
def _prepare_pyshell_blocks(self, text):
"""Ensure that Python interactive shell sessions are put in
code blocks -- even if not properly indented.
"""
if ">>>" not in text:
return text
less_than_tab = self.tab_width - 1
_pyshell_block_re = re.compile(r"""
^([ ]{0,%d})>>>[ ].*\n # first line
^(\1.*\S+.*\n)* # any number of subsequent lines
^\n # ends with a blank line
""" % less_than_tab, re.M | re.X)
return _pyshell_block_re.sub(self._pyshell_block_sub, text) |
def _createunbound(kls, **info):
"""Create a new UnboundNode representing a given class."""
if issubclass(kls, Bitfield):
nodetype = UnboundBitfieldNode
elif hasattr(kls, '_fields_'):
nodetype = UnboundStructureNode
elif issubclass(kls, ctypes.Array):
nodetype = UnboundArrayNode
else:
nodetype = UnboundSimpleNode
return nodetype(type=kls, **info) | Create a new UnboundNode representing a given class. | Below is the the instruction that describes the task:
### Input:
Create a new UnboundNode representing a given class.
### Response:
def _createunbound(kls, **info):
"""Create a new UnboundNode representing a given class."""
if issubclass(kls, Bitfield):
nodetype = UnboundBitfieldNode
elif hasattr(kls, '_fields_'):
nodetype = UnboundStructureNode
elif issubclass(kls, ctypes.Array):
nodetype = UnboundArrayNode
else:
nodetype = UnboundSimpleNode
return nodetype(type=kls, **info) |
def CreateWithLock(self,
urn,
aff4_type,
token=None,
age=NEWEST_TIME,
force_new_version=True,
blocking=True,
blocking_lock_timeout=10,
blocking_sleep_interval=1,
lease_time=100):
"""Creates a new object and locks it.
Similar to OpenWithLock below, this creates a locked object. The difference
is that when you call CreateWithLock, the object does not yet have to exist
in the data store.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
force_new_version: Forces the creation of a new object in the data_store.
blocking: When True, wait and repeatedly try to grab the lock.
blocking_lock_timeout: Maximum wait time when sync is True.
blocking_sleep_interval: Sleep time between lock grabbing attempts. Used
when blocking is True.
lease_time: Maximum time the object stays locked. Lock will be considered
released when this time expires.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid.
"""
if not data_store.AFF4Enabled():
raise NotImplementedError("AFF4 data store has been disabled.")
transaction = self._AcquireLock(
urn,
blocking=blocking,
blocking_lock_timeout=blocking_lock_timeout,
blocking_sleep_interval=blocking_sleep_interval,
lease_time=lease_time)
# Since we now own the data store subject, we can simply create the aff4
# object in the usual way.
return self.Create(
urn,
aff4_type,
mode="rw",
token=token,
age=age,
force_new_version=force_new_version,
transaction=transaction) | Creates a new object and locks it.
Similar to OpenWithLock below, this creates a locked object. The difference
is that when you call CreateWithLock, the object does not yet have to exist
in the data store.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
force_new_version: Forces the creation of a new object in the data_store.
blocking: When True, wait and repeatedly try to grab the lock.
blocking_lock_timeout: Maximum wait time when sync is True.
blocking_sleep_interval: Sleep time between lock grabbing attempts. Used
when blocking is True.
lease_time: Maximum time the object stays locked. Lock will be considered
released when this time expires.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid. | Below is the the instruction that describes the task:
### Input:
Creates a new object and locks it.
Similar to OpenWithLock below, this creates a locked object. The difference
is that when you call CreateWithLock, the object does not yet have to exist
in the data store.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
force_new_version: Forces the creation of a new object in the data_store.
blocking: When True, wait and repeatedly try to grab the lock.
blocking_lock_timeout: Maximum wait time when sync is True.
blocking_sleep_interval: Sleep time between lock grabbing attempts. Used
when blocking is True.
lease_time: Maximum time the object stays locked. Lock will be considered
released when this time expires.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid.
### Response:
def CreateWithLock(self,
urn,
aff4_type,
token=None,
age=NEWEST_TIME,
force_new_version=True,
blocking=True,
blocking_lock_timeout=10,
blocking_sleep_interval=1,
lease_time=100):
"""Creates a new object and locks it.
Similar to OpenWithLock below, this creates a locked object. The difference
is that when you call CreateWithLock, the object does not yet have to exist
in the data store.
Args:
urn: The object to create.
aff4_type: The desired type for this object.
token: The Security Token to use for opening this item.
age: The age policy used to build this object. Only makes sense when mode
has "r".
force_new_version: Forces the creation of a new object in the data_store.
blocking: When True, wait and repeatedly try to grab the lock.
blocking_lock_timeout: Maximum wait time when sync is True.
blocking_sleep_interval: Sleep time between lock grabbing attempts. Used
when blocking is True.
lease_time: Maximum time the object stays locked. Lock will be considered
released when this time expires.
Returns:
An AFF4 object of the desired type and mode.
Raises:
AttributeError: If the mode is invalid.
"""
if not data_store.AFF4Enabled():
raise NotImplementedError("AFF4 data store has been disabled.")
transaction = self._AcquireLock(
urn,
blocking=blocking,
blocking_lock_timeout=blocking_lock_timeout,
blocking_sleep_interval=blocking_sleep_interval,
lease_time=lease_time)
# Since we now own the data store subject, we can simply create the aff4
# object in the usual way.
return self.Create(
urn,
aff4_type,
mode="rw",
token=token,
age=age,
force_new_version=force_new_version,
transaction=transaction) |
def radec_hmstodd(ra, dec):
""" Function to convert HMS values into decimal degrees.
This function relies on the astropy.coordinates package to perform the
conversion to decimal degrees.
Parameters
----------
ra : list or array
List or array of input RA positions
dec : list or array
List or array of input Dec positions
Returns
-------
pos : arr
Array of RA,Dec positions in decimal degrees
Notes
-----
This function supports any specification of RA and Dec as HMS or DMS;
specifically, the formats::
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
See Also
--------
astropy.coordinates
"""
hmstrans = string.maketrans(string.ascii_letters,
' ' * len(string.ascii_letters))
if isinstance(ra, list):
rastr = ':'.join(ra)
elif isinstance(ra, float):
rastr = None
pos_ra = ra
elif ra.find(':') < 0:
# convert any non-numeric characters to spaces
# (we already know the units)
rastr = ra.translate(hmstrans).strip()
rastr = rastr.replace(' ', ' ')
# convert 'nn nn nn.nn' to final 'nn:nn:nn.nn' string
rastr = rastr.replace(' ', ':')
else:
rastr = ra
if isinstance(dec, list):
decstr = ':'.join(dec)
elif isinstance(dec, float):
decstr = None
pos_dec = dec
elif dec.find(':') < 0:
decstr = dec.translate(hmstrans).strip()
decstr = decstr.replace(' ', ' ')
decstr = decstr.replace(' ', ':')
else:
decstr = dec
if rastr is None:
pos = (pos_ra, pos_dec)
else:
pos_coord = coords.SkyCoord(rastr + ' ' + decstr,
unit=(u.hourangle, u.deg))
pos = (pos_coord.ra.deg, pos_coord.dec.deg)
return pos | Function to convert HMS values into decimal degrees.
This function relies on the astropy.coordinates package to perform the
conversion to decimal degrees.
Parameters
----------
ra : list or array
List or array of input RA positions
dec : list or array
List or array of input Dec positions
Returns
-------
pos : arr
Array of RA,Dec positions in decimal degrees
Notes
-----
This function supports any specification of RA and Dec as HMS or DMS;
specifically, the formats::
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
See Also
--------
astropy.coordinates | Below is the the instruction that describes the task:
### Input:
Function to convert HMS values into decimal degrees.
This function relies on the astropy.coordinates package to perform the
conversion to decimal degrees.
Parameters
----------
ra : list or array
List or array of input RA positions
dec : list or array
List or array of input Dec positions
Returns
-------
pos : arr
Array of RA,Dec positions in decimal degrees
Notes
-----
This function supports any specification of RA and Dec as HMS or DMS;
specifically, the formats::
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
See Also
--------
astropy.coordinates
### Response:
def radec_hmstodd(ra, dec):
""" Function to convert HMS values into decimal degrees.
This function relies on the astropy.coordinates package to perform the
conversion to decimal degrees.
Parameters
----------
ra : list or array
List or array of input RA positions
dec : list or array
List or array of input Dec positions
Returns
-------
pos : arr
Array of RA,Dec positions in decimal degrees
Notes
-----
This function supports any specification of RA and Dec as HMS or DMS;
specifically, the formats::
["nn","nn","nn.nn"]
"nn nn nn.nnn"
"nn:nn:nn.nn"
"nnH nnM nn.nnS" or "nnD nnM nn.nnS"
See Also
--------
astropy.coordinates
"""
hmstrans = string.maketrans(string.ascii_letters,
' ' * len(string.ascii_letters))
if isinstance(ra, list):
rastr = ':'.join(ra)
elif isinstance(ra, float):
rastr = None
pos_ra = ra
elif ra.find(':') < 0:
# convert any non-numeric characters to spaces
# (we already know the units)
rastr = ra.translate(hmstrans).strip()
rastr = rastr.replace(' ', ' ')
# convert 'nn nn nn.nn' to final 'nn:nn:nn.nn' string
rastr = rastr.replace(' ', ':')
else:
rastr = ra
if isinstance(dec, list):
decstr = ':'.join(dec)
elif isinstance(dec, float):
decstr = None
pos_dec = dec
elif dec.find(':') < 0:
decstr = dec.translate(hmstrans).strip()
decstr = decstr.replace(' ', ' ')
decstr = decstr.replace(' ', ':')
else:
decstr = dec
if rastr is None:
pos = (pos_ra, pos_dec)
else:
pos_coord = coords.SkyCoord(rastr + ' ' + decstr,
unit=(u.hourangle, u.deg))
pos = (pos_coord.ra.deg, pos_coord.dec.deg)
return pos |
def msg(self, level, s, *args):
"""
Print a debug message with the given level
"""
if s and level <= self.debug:
print "%s%s %s" % (" " * self.indent, s, ' '.join(map(repr, args))) | Print a debug message with the given level | Below is the the instruction that describes the task:
### Input:
Print a debug message with the given level
### Response:
def msg(self, level, s, *args):
"""
Print a debug message with the given level
"""
if s and level <= self.debug:
print "%s%s %s" % (" " * self.indent, s, ' '.join(map(repr, args))) |
def walnut_data():
"""Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
"""
# TODO: Store data in some ODL controlled url
url = 'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat'
dct = get_data('walnut.mat', subset=DATA_SUBSET, url=url)
# Change axes to match ODL definitions
data = np.swapaxes(dct['sinogram1200'], 0, 1)[::-1, ::-1]
data = data.astype('float')
# Very crude gain normalization
data = -np.log(data / np.max(data, axis=1)[:, None])
return data | Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064 | Below is the the instruction that describes the task:
### Input:
Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
### Response:
def walnut_data():
"""Tomographic X-ray data of a walnut.
Notes
-----
See the article `Tomographic X-ray data of a walnut`_ for further
information.
See Also
--------
walnut_geometry
References
----------
.. _Tomographic X-ray data of a walnut: https://arxiv.org/abs/1502.04064
"""
# TODO: Store data in some ODL controlled url
url = 'http://www.fips.fi/dataset/CT_walnut_v1/FullSizeSinograms.mat'
dct = get_data('walnut.mat', subset=DATA_SUBSET, url=url)
# Change axes to match ODL definitions
data = np.swapaxes(dct['sinogram1200'], 0, 1)[::-1, ::-1]
data = data.astype('float')
# Very crude gain normalization
data = -np.log(data / np.max(data, axis=1)[:, None])
return data |
def p_parallelblock(self, p):
'parallelblock : FORK block_statements JOIN'
p[0] = ParallelBlock(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | parallelblock : FORK block_statements JOIN | Below is the the instruction that describes the task:
### Input:
parallelblock : FORK block_statements JOIN
### Response:
def p_parallelblock(self, p):
'parallelblock : FORK block_statements JOIN'
p[0] = ParallelBlock(p[2], lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def distribution_absent(name, region=None, key=None, keyid=None, profile=None, **kwargs):
'''
Ensure a distribution with the given Name tag does not exist.
Note that CloudFront does not allow directly deleting an enabled
Distribution. If such is requested, Salt will attempt to first update the
distribution's status to Disabled, and once that returns success, to then
delete the resource. THIS CAN TAKE SOME TIME, so be patient :)
name (string)
Name of the state definition.
Name (string)
Name of the CloudFront distribution to be managed. If not provided, the
value of ``name`` will be used as a default. The purpose of this
parameter is only to resolve it to a Resource ID, so be aware that an
explicit value for ``Id`` below will override any value provided, or
defaulted, here.
Id (string)
The Resource ID of a CloudFront distribution to be managed.
region (string)
Region to connect to
key (string)
Secret key to use
keyid (string)
Access key to use
profile (dict or string)
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
Example:
.. code-block:: yaml
Ensure a distribution named my_distribution is gone:
boto_cloudfront.distribution_absent:
- Name: my_distribution
'''
Name = kwargs['Name'] if 'Name' in kwargs else name
Id = kwargs.get('Id')
ref = kwargs['Id'] if 'Id' in kwargs else Name
ret = {'name': Id if Id else Name, 'comment': '', 'changes': {}, 'result': True}
authargs = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
if not Id:
res = __salt__['boto_cloudfront.get_distributions_by_comment'](Comment=Name, **authargs)
if res is None:
msg = 'Error dereferencing CloudFront distribution `{}` to a Resource ID.'.format(Name)
log.error(msg)
ret['comment'] = msg
ret['result'] = False
return ret
if len(res) > 1:
msg = ('Multiple CloudFront distibutions matched `{}`, no way to know which to'
' delete.`.'.format(Name))
log.error(msg)
ret['comment'] = msg
ret['result'] = False
return ret
if not res:
msg = 'CloudFront Distribution `{}` already absent.'.format(Name)
log.info(msg)
ret['comment'] = msg
ret['result'] = True
return ret
Id = res[0]['Id']
if not __salt__['boto_cloudfront.distribution_exists'](Id=Id, **authargs):
msg = 'CloudFront distribution `{}` already absent.'.format(ref)
log.info(msg)
ret['comment'] = msg
return ret
old = __salt__['boto_cloudfront.get_distribution_v2'](Id=Id, **authargs)
if old is None:
ret['result'] = False
msg = 'Error getting state of CloudFront distribution `{}`.'.format(ref)
log.error(msg)
ret['comment'] = msg
return ret
currETag = old['ETag']
Enabled = old['DistributionConfig']['Enabled']
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'CloudFront distribution `{}` would be {}deleted.'.format(ref,
('disabled and ' if Enabled else ''))
ret['pchanges'] = {'old': old, 'new': None}
return ret
comments = []
if Enabled:
disabled = __salt__['boto_cloudfront.disable_distribution'](Id=Id, **authargs)
if disabled is None:
ret['result'] = False
msg = 'Error disabling CloudFront distribution `{}`'.format(ref)
log.error(msg)
ret['comment'] = msg
return ret
comments += ['CloudFront distribution `{}` disabled.'.format(ref)]
currETag = disabled['ETag']
deleted = __salt__['boto_cloudfront.delete_distribution'](Id=Id, IfMatch=currETag, **authargs)
if deleted is False:
ret['result'] = False
msg = 'Error deleting CloudFront distribution `{}`'.format(ref)
comments += [msg]
log.error(msg)
ret['comment'] = ' '.join(comments)
return ret
msg = 'CloudFront distribution `{}` deleted.'.format(ref)
comments += [msg]
log.info(msg)
ret['comment'] = ' '.join(comments)
ret['changes'] = {'old': old, 'new': None}
return ret | Ensure a distribution with the given Name tag does not exist.
Note that CloudFront does not allow directly deleting an enabled
Distribution. If such is requested, Salt will attempt to first update the
distribution's status to Disabled, and once that returns success, to then
delete the resource. THIS CAN TAKE SOME TIME, so be patient :)
name (string)
Name of the state definition.
Name (string)
Name of the CloudFront distribution to be managed. If not provided, the
value of ``name`` will be used as a default. The purpose of this
parameter is only to resolve it to a Resource ID, so be aware that an
explicit value for ``Id`` below will override any value provided, or
defaulted, here.
Id (string)
The Resource ID of a CloudFront distribution to be managed.
region (string)
Region to connect to
key (string)
Secret key to use
keyid (string)
Access key to use
profile (dict or string)
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
Example:
.. code-block:: yaml
Ensure a distribution named my_distribution is gone:
boto_cloudfront.distribution_absent:
- Name: my_distribution | Below is the the instruction that describes the task:
### Input:
Ensure a distribution with the given Name tag does not exist.
Note that CloudFront does not allow directly deleting an enabled
Distribution. If such is requested, Salt will attempt to first update the
distribution's status to Disabled, and once that returns success, to then
delete the resource. THIS CAN TAKE SOME TIME, so be patient :)
name (string)
Name of the state definition.
Name (string)
Name of the CloudFront distribution to be managed. If not provided, the
value of ``name`` will be used as a default. The purpose of this
parameter is only to resolve it to a Resource ID, so be aware that an
explicit value for ``Id`` below will override any value provided, or
defaulted, here.
Id (string)
The Resource ID of a CloudFront distribution to be managed.
region (string)
Region to connect to
key (string)
Secret key to use
keyid (string)
Access key to use
profile (dict or string)
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
Example:
.. code-block:: yaml
Ensure a distribution named my_distribution is gone:
boto_cloudfront.distribution_absent:
- Name: my_distribution
### Response:
def distribution_absent(name, region=None, key=None, keyid=None, profile=None, **kwargs):
'''
Ensure a distribution with the given Name tag does not exist.
Note that CloudFront does not allow directly deleting an enabled
Distribution. If such is requested, Salt will attempt to first update the
distribution's status to Disabled, and once that returns success, to then
delete the resource. THIS CAN TAKE SOME TIME, so be patient :)
name (string)
Name of the state definition.
Name (string)
Name of the CloudFront distribution to be managed. If not provided, the
value of ``name`` will be used as a default. The purpose of this
parameter is only to resolve it to a Resource ID, so be aware that an
explicit value for ``Id`` below will override any value provided, or
defaulted, here.
Id (string)
The Resource ID of a CloudFront distribution to be managed.
region (string)
Region to connect to
key (string)
Secret key to use
keyid (string)
Access key to use
profile (dict or string)
Dict, or pillar key pointing to a dict, containing AWS region/key/keyid.
Example:
.. code-block:: yaml
Ensure a distribution named my_distribution is gone:
boto_cloudfront.distribution_absent:
- Name: my_distribution
'''
Name = kwargs['Name'] if 'Name' in kwargs else name
Id = kwargs.get('Id')
ref = kwargs['Id'] if 'Id' in kwargs else Name
ret = {'name': Id if Id else Name, 'comment': '', 'changes': {}, 'result': True}
authargs = {'region': region, 'key': key, 'keyid': keyid, 'profile': profile}
if not Id:
res = __salt__['boto_cloudfront.get_distributions_by_comment'](Comment=Name, **authargs)
if res is None:
msg = 'Error dereferencing CloudFront distribution `{}` to a Resource ID.'.format(Name)
log.error(msg)
ret['comment'] = msg
ret['result'] = False
return ret
if len(res) > 1:
msg = ('Multiple CloudFront distibutions matched `{}`, no way to know which to'
' delete.`.'.format(Name))
log.error(msg)
ret['comment'] = msg
ret['result'] = False
return ret
if not res:
msg = 'CloudFront Distribution `{}` already absent.'.format(Name)
log.info(msg)
ret['comment'] = msg
ret['result'] = True
return ret
Id = res[0]['Id']
if not __salt__['boto_cloudfront.distribution_exists'](Id=Id, **authargs):
msg = 'CloudFront distribution `{}` already absent.'.format(ref)
log.info(msg)
ret['comment'] = msg
return ret
old = __salt__['boto_cloudfront.get_distribution_v2'](Id=Id, **authargs)
if old is None:
ret['result'] = False
msg = 'Error getting state of CloudFront distribution `{}`.'.format(ref)
log.error(msg)
ret['comment'] = msg
return ret
currETag = old['ETag']
Enabled = old['DistributionConfig']['Enabled']
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'CloudFront distribution `{}` would be {}deleted.'.format(ref,
('disabled and ' if Enabled else ''))
ret['pchanges'] = {'old': old, 'new': None}
return ret
comments = []
if Enabled:
disabled = __salt__['boto_cloudfront.disable_distribution'](Id=Id, **authargs)
if disabled is None:
ret['result'] = False
msg = 'Error disabling CloudFront distribution `{}`'.format(ref)
log.error(msg)
ret['comment'] = msg
return ret
comments += ['CloudFront distribution `{}` disabled.'.format(ref)]
currETag = disabled['ETag']
deleted = __salt__['boto_cloudfront.delete_distribution'](Id=Id, IfMatch=currETag, **authargs)
if deleted is False:
ret['result'] = False
msg = 'Error deleting CloudFront distribution `{}`'.format(ref)
comments += [msg]
log.error(msg)
ret['comment'] = ' '.join(comments)
return ret
msg = 'CloudFront distribution `{}` deleted.'.format(ref)
comments += [msg]
log.info(msg)
ret['comment'] = ' '.join(comments)
ret['changes'] = {'old': old, 'new': None}
return ret |
def list_hosts(kwargs=None, call=None):
'''
List all the hosts for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_hosts my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_hosts function must be called with '
'-f or --function.'
)
return {'Hosts': salt.utils.vmware.list_hosts(_get_si())} | List all the hosts for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_hosts my-vmware-config | Below is the the instruction that describes the task:
### Input:
List all the hosts for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_hosts my-vmware-config
### Response:
def list_hosts(kwargs=None, call=None):
'''
List all the hosts for this VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -f list_hosts my-vmware-config
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_hosts function must be called with '
'-f or --function.'
)
return {'Hosts': salt.utils.vmware.list_hosts(_get_si())} |
def get_columns(self, index, columns=None, as_dict=False):
"""
For a single index and list of column names return a DataFrame of the values in that index as either a dict
or a DataFrame
:param index: single index value
:param columns: list of column names
:param as_dict: if True then return the result as a dictionary
:return: DataFrame or dictionary
"""
i = sorted_index(self._index, index) if self._sort else self._index.index(index)
return self.get_location(i, columns, as_dict) | For a single index and list of column names return a DataFrame of the values in that index as either a dict
or a DataFrame
:param index: single index value
:param columns: list of column names
:param as_dict: if True then return the result as a dictionary
:return: DataFrame or dictionary | Below is the the instruction that describes the task:
### Input:
For a single index and list of column names return a DataFrame of the values in that index as either a dict
or a DataFrame
:param index: single index value
:param columns: list of column names
:param as_dict: if True then return the result as a dictionary
:return: DataFrame or dictionary
### Response:
def get_columns(self, index, columns=None, as_dict=False):
"""
For a single index and list of column names return a DataFrame of the values in that index as either a dict
or a DataFrame
:param index: single index value
:param columns: list of column names
:param as_dict: if True then return the result as a dictionary
:return: DataFrame or dictionary
"""
i = sorted_index(self._index, index) if self._sort else self._index.index(index)
return self.get_location(i, columns, as_dict) |
def request_homescreen(blink):
"""Request homescreen info."""
url = "{}/api/v3/accounts/{}/homescreen".format(blink.urls.base_url,
blink.account_id)
return http_get(blink, url) | Request homescreen info. | Below is the the instruction that describes the task:
### Input:
Request homescreen info.
### Response:
def request_homescreen(blink):
"""Request homescreen info."""
url = "{}/api/v3/accounts/{}/homescreen".format(blink.urls.base_url,
blink.account_id)
return http_get(blink, url) |
def mkRepr(instance, *argls, **kwargs):
r"""Convinience function to implement ``__repr__``. `kwargs` values are
``repr`` ed. Special behavior for ``instance=None``: just the
arguments are formatted.
Example:
>>> class Thing:
... def __init__(self, color, shape, taste=None):
... self.color, self.shape, self.taste = color, shape, taste
... def __repr__(self):
... return mkRepr(self, self.color, self.shape, taste=self.taste)
...
>>> maggot = Thing('white', 'cylindrical', 'chicken')
>>> maggot
Thing('white', 'cylindrical', taste='chicken')
>>> Thing('Color # 132942430-214809804-412430988081-241234', 'unkown', taste=maggot)
Thing('Color # 132942430-214809804-412430988081-241234',
'unkown',
taste=Thing('white', 'cylindrical', taste='chicken'))
"""
width=79
maxIndent=15
minIndent=2
args = (map(repr, argls) + ["%s=%r" % (k, v)
for (k,v) in sorted(kwargs.items())]) or [""]
if instance is not None:
start = "%s(" % instance.__class__.__name__
args[-1] += ")"
else:
start = ""
if len(start) <= maxIndent and len(start) + len(args[0]) <= width and \
max(map(len,args)) <= width: # XXX mag of last condition bit arbitrary
indent = len(start)
args[0] = start + args[0]
if sum(map(len, args)) + 2*(len(args) - 1) <= width:
return ", ".join(args)
else:
indent = minIndent
args[0] = start + "\n" + " " * indent + args[0]
return (",\n" + " " * indent).join(args) | r"""Convinience function to implement ``__repr__``. `kwargs` values are
``repr`` ed. Special behavior for ``instance=None``: just the
arguments are formatted.
Example:
>>> class Thing:
... def __init__(self, color, shape, taste=None):
... self.color, self.shape, self.taste = color, shape, taste
... def __repr__(self):
... return mkRepr(self, self.color, self.shape, taste=self.taste)
...
>>> maggot = Thing('white', 'cylindrical', 'chicken')
>>> maggot
Thing('white', 'cylindrical', taste='chicken')
>>> Thing('Color # 132942430-214809804-412430988081-241234', 'unkown', taste=maggot)
Thing('Color # 132942430-214809804-412430988081-241234',
'unkown',
taste=Thing('white', 'cylindrical', taste='chicken')) | Below is the the instruction that describes the task:
### Input:
r"""Convinience function to implement ``__repr__``. `kwargs` values are
``repr`` ed. Special behavior for ``instance=None``: just the
arguments are formatted.
Example:
>>> class Thing:
... def __init__(self, color, shape, taste=None):
... self.color, self.shape, self.taste = color, shape, taste
... def __repr__(self):
... return mkRepr(self, self.color, self.shape, taste=self.taste)
...
>>> maggot = Thing('white', 'cylindrical', 'chicken')
>>> maggot
Thing('white', 'cylindrical', taste='chicken')
>>> Thing('Color # 132942430-214809804-412430988081-241234', 'unkown', taste=maggot)
Thing('Color # 132942430-214809804-412430988081-241234',
'unkown',
taste=Thing('white', 'cylindrical', taste='chicken'))
### Response:
def mkRepr(instance, *argls, **kwargs):
r"""Convinience function to implement ``__repr__``. `kwargs` values are
``repr`` ed. Special behavior for ``instance=None``: just the
arguments are formatted.
Example:
>>> class Thing:
... def __init__(self, color, shape, taste=None):
... self.color, self.shape, self.taste = color, shape, taste
... def __repr__(self):
... return mkRepr(self, self.color, self.shape, taste=self.taste)
...
>>> maggot = Thing('white', 'cylindrical', 'chicken')
>>> maggot
Thing('white', 'cylindrical', taste='chicken')
>>> Thing('Color # 132942430-214809804-412430988081-241234', 'unkown', taste=maggot)
Thing('Color # 132942430-214809804-412430988081-241234',
'unkown',
taste=Thing('white', 'cylindrical', taste='chicken'))
"""
width=79
maxIndent=15
minIndent=2
args = (map(repr, argls) + ["%s=%r" % (k, v)
for (k,v) in sorted(kwargs.items())]) or [""]
if instance is not None:
start = "%s(" % instance.__class__.__name__
args[-1] += ")"
else:
start = ""
if len(start) <= maxIndent and len(start) + len(args[0]) <= width and \
max(map(len,args)) <= width: # XXX mag of last condition bit arbitrary
indent = len(start)
args[0] = start + args[0]
if sum(map(len, args)) + 2*(len(args) - 1) <= width:
return ", ".join(args)
else:
indent = minIndent
args[0] = start + "\n" + " " * indent + args[0]
return (",\n" + " " * indent).join(args) |
def destroy(self):
""" A reimplemented destructor that cancels
the dialog before destroying.
"""
super(AndroidPopupWindow, self).destroy()
window = self.window
if window:
#: Clear the dismiss listener
#: (or we get an error during the callback)
window.setOnDismissListener(None)
#window.dismiss()
del self.window | A reimplemented destructor that cancels
the dialog before destroying. | Below is the the instruction that describes the task:
### Input:
A reimplemented destructor that cancels
the dialog before destroying.
### Response:
def destroy(self):
""" A reimplemented destructor that cancels
the dialog before destroying.
"""
super(AndroidPopupWindow, self).destroy()
window = self.window
if window:
#: Clear the dismiss listener
#: (or we get an error during the callback)
window.setOnDismissListener(None)
#window.dismiss()
del self.window |
def _parse_result(result):
"""
Parse ``clamscan`` output into same dictionary structured used by
``pyclamd``.
Input example::
/home/bystrousak/Plocha/prace/test/eicar.com: Eicar-Test-Signature FOUND
Output dict::
{
"/home/bystrousak/Plocha/prace/test/eicar.com": (
"FOUND",
"Eicar-Test-Signature"
)
}
"""
lines = filter(lambda x: x.strip(), result.splitlines()) # rm blank lines
if not lines:
return {}
out = {}
for line in lines:
line = line.split(":")
fn = line[0].strip()
line = " ".join(line[1:])
line = line.rsplit(None, 1)
status = line.pop().strip()
out[fn] = (status, " ".join(line).strip())
return out | Parse ``clamscan`` output into same dictionary structured used by
``pyclamd``.
Input example::
/home/bystrousak/Plocha/prace/test/eicar.com: Eicar-Test-Signature FOUND
Output dict::
{
"/home/bystrousak/Plocha/prace/test/eicar.com": (
"FOUND",
"Eicar-Test-Signature"
)
} | Below is the the instruction that describes the task:
### Input:
Parse ``clamscan`` output into same dictionary structured used by
``pyclamd``.
Input example::
/home/bystrousak/Plocha/prace/test/eicar.com: Eicar-Test-Signature FOUND
Output dict::
{
"/home/bystrousak/Plocha/prace/test/eicar.com": (
"FOUND",
"Eicar-Test-Signature"
)
}
### Response:
def _parse_result(result):
"""
Parse ``clamscan`` output into same dictionary structured used by
``pyclamd``.
Input example::
/home/bystrousak/Plocha/prace/test/eicar.com: Eicar-Test-Signature FOUND
Output dict::
{
"/home/bystrousak/Plocha/prace/test/eicar.com": (
"FOUND",
"Eicar-Test-Signature"
)
}
"""
lines = filter(lambda x: x.strip(), result.splitlines()) # rm blank lines
if not lines:
return {}
out = {}
for line in lines:
line = line.split(":")
fn = line[0].strip()
line = " ".join(line[1:])
line = line.rsplit(None, 1)
status = line.pop().strip()
out[fn] = (status, " ".join(line).strip())
return out |
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
print('Connected to %s' % link_uri)
mems = self._cf.mem.get_mems(MemoryElement.TYPE_I2C)
print('Found {} EEPOM(s)'.format(len(mems)))
if len(mems) > 0:
print('Writing default configuration to'
' memory {}'.format(mems[0].id))
elems = mems[0].elements
elems['version'] = 1
elems['pitch_trim'] = 0.0
elems['roll_trim'] = 0.0
elems['radio_channel'] = 80
elems['radio_speed'] = 0
elems['radio_address'] = 0xE7E7E7E7E7
mems[0].write_data(self._data_written) | This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded. | Below is the the instruction that describes the task:
### Input:
This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded.
### Response:
def _connected(self, link_uri):
""" This callback is called form the Crazyflie API when a Crazyflie
has been connected and the TOCs have been downloaded."""
print('Connected to %s' % link_uri)
mems = self._cf.mem.get_mems(MemoryElement.TYPE_I2C)
print('Found {} EEPOM(s)'.format(len(mems)))
if len(mems) > 0:
print('Writing default configuration to'
' memory {}'.format(mems[0].id))
elems = mems[0].elements
elems['version'] = 1
elems['pitch_trim'] = 0.0
elems['roll_trim'] = 0.0
elems['radio_channel'] = 80
elems['radio_speed'] = 0
elems['radio_address'] = 0xE7E7E7E7E7
mems[0].write_data(self._data_written) |
def is_empty(self):
'''Returns True if all titleInfo subfields are not set or
empty; returns False if any of the fields are not empty.'''
return not bool(self.title or self.subtitle or self.part_number \
or self.part_name or self.non_sort or self.type) | Returns True if all titleInfo subfields are not set or
empty; returns False if any of the fields are not empty. | Below is the the instruction that describes the task:
### Input:
Returns True if all titleInfo subfields are not set or
empty; returns False if any of the fields are not empty.
### Response:
def is_empty(self):
'''Returns True if all titleInfo subfields are not set or
empty; returns False if any of the fields are not empty.'''
return not bool(self.title or self.subtitle or self.part_number \
or self.part_name or self.non_sort or self.type) |
def fftr(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the real part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftr
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return real(fft(wave, npoints, indep_min, indep_max)) | r"""
Return the real part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftr
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]] | Below is the the instruction that describes the task:
### Input:
r"""
Return the real part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftr
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
### Response:
def fftr(wave, npoints=None, indep_min=None, indep_max=None):
r"""
Return the real part of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftr
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return real(fft(wave, npoints, indep_min, indep_max)) |
def open(self):
"""
Opens the port.
:returns: Deferred that callbacks when we are ready to make and receive calls.
"""
logging.debug("Opening rpc system")
d = self._connectionpool.open(self._packet_received)
def opened(_):
logging.debug("RPC system is open")
self._opened = True
logging.debug("Starting ping loop")
self._ping_loop.start(self._ping_interval, now=False)
d.addCallback(opened)
return d | Opens the port.
:returns: Deferred that callbacks when we are ready to make and receive calls. | Below is the the instruction that describes the task:
### Input:
Opens the port.
:returns: Deferred that callbacks when we are ready to make and receive calls.
### Response:
def open(self):
"""
Opens the port.
:returns: Deferred that callbacks when we are ready to make and receive calls.
"""
logging.debug("Opening rpc system")
d = self._connectionpool.open(self._packet_received)
def opened(_):
logging.debug("RPC system is open")
self._opened = True
logging.debug("Starting ping loop")
self._ping_loop.start(self._ping_interval, now=False)
d.addCallback(opened)
return d |
def t_ID(self, token):
r'[a-zA-Z_][a-zA-Z0-9_-]*'
if token.value in self.KEYWORDS:
token.type = self.KEYWORDS[token.value]
return token
else:
return token | r'[a-zA-Z_][a-zA-Z0-9_-]* | Below is the the instruction that describes the task:
### Input:
r'[a-zA-Z_][a-zA-Z0-9_-]*
### Response:
def t_ID(self, token):
r'[a-zA-Z_][a-zA-Z0-9_-]*'
if token.value in self.KEYWORDS:
token.type = self.KEYWORDS[token.value]
return token
else:
return token |
def search_tag(self, tag, symbols=True, feeds=False):
""" Get a list of Symbols by searching a tag or partial tag.
Parameters
----------
tag : str
The tag to search. Appending '%' will use SQL's "LIKE"
functionality.
symbols : bool, optional
Search for Symbol's based on their tags.
feeds : bool, optional
Search for Symbol's based on their Feeds' tags.
Returns
-------
List of Symbols or empty list
"""
syms = []
if isinstance(tag, (str, unicode)):
tags = [tag]
else:
tags = tag
if symbols:
crits = []
for tag in tags:
if "%" in tag:
crit = SymbolTag.tag.like(tag)
else:
crit = SymbolTag.tag == tag
crits.append(crit)
qry = self.ses.query(SymbolTag)
qry = qry.filter(or_(*crits))
syms = qry.all()
syms = [tagged.symbol for tagged in syms]
if feeds:
crits = []
for tag in tags:
if "%" in tag:
crit = FeedTag.tag.like(tag)
else:
crit = FeedTag.tag == tag
crits.append(crit)
qry = self.ses.query(Symbol).select_from(FeedTag)
qry = qry.join(FeedTag.feed).join(Feed.symbol)
qry = qry.filter(or_(*crits))
fds = qry.distinct()
syms = syms + [sym for sym in fds]
return list(set(syms))
return syms | Get a list of Symbols by searching a tag or partial tag.
Parameters
----------
tag : str
The tag to search. Appending '%' will use SQL's "LIKE"
functionality.
symbols : bool, optional
Search for Symbol's based on their tags.
feeds : bool, optional
Search for Symbol's based on their Feeds' tags.
Returns
-------
List of Symbols or empty list | Below is the the instruction that describes the task:
### Input:
Get a list of Symbols by searching a tag or partial tag.
Parameters
----------
tag : str
The tag to search. Appending '%' will use SQL's "LIKE"
functionality.
symbols : bool, optional
Search for Symbol's based on their tags.
feeds : bool, optional
Search for Symbol's based on their Feeds' tags.
Returns
-------
List of Symbols or empty list
### Response:
def search_tag(self, tag, symbols=True, feeds=False):
""" Get a list of Symbols by searching a tag or partial tag.
Parameters
----------
tag : str
The tag to search. Appending '%' will use SQL's "LIKE"
functionality.
symbols : bool, optional
Search for Symbol's based on their tags.
feeds : bool, optional
Search for Symbol's based on their Feeds' tags.
Returns
-------
List of Symbols or empty list
"""
syms = []
if isinstance(tag, (str, unicode)):
tags = [tag]
else:
tags = tag
if symbols:
crits = []
for tag in tags:
if "%" in tag:
crit = SymbolTag.tag.like(tag)
else:
crit = SymbolTag.tag == tag
crits.append(crit)
qry = self.ses.query(SymbolTag)
qry = qry.filter(or_(*crits))
syms = qry.all()
syms = [tagged.symbol for tagged in syms]
if feeds:
crits = []
for tag in tags:
if "%" in tag:
crit = FeedTag.tag.like(tag)
else:
crit = FeedTag.tag == tag
crits.append(crit)
qry = self.ses.query(Symbol).select_from(FeedTag)
qry = qry.join(FeedTag.feed).join(Feed.symbol)
qry = qry.filter(or_(*crits))
fds = qry.distinct()
syms = syms + [sym for sym in fds]
return list(set(syms))
return syms |
def exterior_almost_equals(self, other, max_distance=1e-6, points_per_edge=8):
"""
Estimate if this and other polygon's exterior are almost identical.
The two exteriors can have different numbers of points, but any point
randomly sampled on the exterior of one polygon should be close to the
closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with
polygons with fairly different shapes that will still be estimated as
equal by this method. In practice however this should be unlikely to be
the case. The probability for something like that goes down as the
interpolation parameter is increased.
Parameters
----------
other : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype ``float32`` and shape ``(N,2)`` with the
second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting
xy-coordinates.
max_distance : number, optional
The maximum euclidean distance between a point on one polygon and
the closest point on the other polygon. If the distance is exceeded
for any such pair, the two exteriors are not viewed as equal. The
points are other the points contained in the polygon's exterior
ndarray or interpolated points between these.
points_per_edge : int, optional
How many points to interpolate on each edge.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal
(approximate test).
"""
if isinstance(other, list):
other = Polygon(np.float32(other))
elif ia.is_np_array(other):
other = Polygon(other)
else:
assert isinstance(other, Polygon)
other = other
return self.to_line_string(closed=True).coords_almost_equals(
other.to_line_string(closed=True),
max_distance=max_distance,
points_per_edge=points_per_edge
) | Estimate if this and other polygon's exterior are almost identical.
The two exteriors can have different numbers of points, but any point
randomly sampled on the exterior of one polygon should be close to the
closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with
polygons with fairly different shapes that will still be estimated as
equal by this method. In practice however this should be unlikely to be
the case. The probability for something like that goes down as the
interpolation parameter is increased.
Parameters
----------
other : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype ``float32`` and shape ``(N,2)`` with the
second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting
xy-coordinates.
max_distance : number, optional
The maximum euclidean distance between a point on one polygon and
the closest point on the other polygon. If the distance is exceeded
for any such pair, the two exteriors are not viewed as equal. The
points are other the points contained in the polygon's exterior
ndarray or interpolated points between these.
points_per_edge : int, optional
How many points to interpolate on each edge.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal
(approximate test). | Below is the the instruction that describes the task:
### Input:
Estimate if this and other polygon's exterior are almost identical.
The two exteriors can have different numbers of points, but any point
randomly sampled on the exterior of one polygon should be close to the
closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with
polygons with fairly different shapes that will still be estimated as
equal by this method. In practice however this should be unlikely to be
the case. The probability for something like that goes down as the
interpolation parameter is increased.
Parameters
----------
other : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype ``float32`` and shape ``(N,2)`` with the
second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting
xy-coordinates.
max_distance : number, optional
The maximum euclidean distance between a point on one polygon and
the closest point on the other polygon. If the distance is exceeded
for any such pair, the two exteriors are not viewed as equal. The
points are other the points contained in the polygon's exterior
ndarray or interpolated points between these.
points_per_edge : int, optional
How many points to interpolate on each edge.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal
(approximate test).
### Response:
def exterior_almost_equals(self, other, max_distance=1e-6, points_per_edge=8):
"""
Estimate if this and other polygon's exterior are almost identical.
The two exteriors can have different numbers of points, but any point
randomly sampled on the exterior of one polygon should be close to the
closest point on the exterior of the other polygon.
Note that this method works approximately. One can come up with
polygons with fairly different shapes that will still be estimated as
equal by this method. In practice however this should be unlikely to be
the case. The probability for something like that goes down as the
interpolation parameter is increased.
Parameters
----------
other : imgaug.Polygon or (N,2) ndarray or list of tuple
The other polygon with which to compare the exterior.
If this is an ndarray, it is assumed to represent an exterior.
It must then have dtype ``float32`` and shape ``(N,2)`` with the
second dimension denoting xy-coordinates.
If this is a list of tuples, it is assumed to represent an exterior.
Each tuple then must contain exactly two numbers, denoting
xy-coordinates.
max_distance : number, optional
The maximum euclidean distance between a point on one polygon and
the closest point on the other polygon. If the distance is exceeded
for any such pair, the two exteriors are not viewed as equal. The
points are other the points contained in the polygon's exterior
ndarray or interpolated points between these.
points_per_edge : int, optional
How many points to interpolate on each edge.
Returns
-------
bool
Whether the two polygon's exteriors can be viewed as equal
(approximate test).
"""
if isinstance(other, list):
other = Polygon(np.float32(other))
elif ia.is_np_array(other):
other = Polygon(other)
else:
assert isinstance(other, Polygon)
other = other
return self.to_line_string(closed=True).coords_almost_equals(
other.to_line_string(closed=True),
max_distance=max_distance,
points_per_edge=points_per_edge
) |
def to_simple_dict(self):
"""Return a dict of only the basic data about the release"""
return {
'version': self.version,
'product': self.product,
'channel': self.channel,
'is_public': self.is_public,
'slug': self.slug,
'title': unicode(self),
} | Return a dict of only the basic data about the release | Below is the the instruction that describes the task:
### Input:
Return a dict of only the basic data about the release
### Response:
def to_simple_dict(self):
"""Return a dict of only the basic data about the release"""
return {
'version': self.version,
'product': self.product,
'channel': self.channel,
'is_public': self.is_public,
'slug': self.slug,
'title': unicode(self),
} |
def lonely_buckets(self):
"""
Get all of the buckets that haven't been updated in over
an hour.
"""
hrago = time.monotonic() - 3600
return [b for b in self.buckets if b.last_updated < hrago] | Get all of the buckets that haven't been updated in over
an hour. | Below is the the instruction that describes the task:
### Input:
Get all of the buckets that haven't been updated in over
an hour.
### Response:
def lonely_buckets(self):
"""
Get all of the buckets that haven't been updated in over
an hour.
"""
hrago = time.monotonic() - 3600
return [b for b in self.buckets if b.last_updated < hrago] |
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
]) | Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove | Below is the the instruction that describes the task:
### Input:
Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
### Response:
def remove_from_queue(self, index):
"""Remove a track from the queue by index. The index number is
required as an argument, where the first index is 0.
Args:
index (int): The (0-based) index of the track to remove
"""
# TODO: what do these parameters actually do?
updid = '0'
objid = 'Q:0/' + str(index + 1)
self.avTransport.RemoveTrackFromQueue([
('InstanceID', 0),
('ObjectID', objid),
('UpdateID', updid),
]) |
def get_seqstarts(bamfile, N):
""" Go through the SQ headers and pull out all sequences with size
greater than the resolution settings, i.e. contains at least a few cells
"""
import pysam
bamfile = pysam.AlignmentFile(bamfile, "rb")
seqsize = {}
for kv in bamfile.header["SQ"]:
if kv["LN"] < 10 * N:
continue
seqsize[kv["SN"]] = kv["LN"] / N + 1
allseqs = natsorted(seqsize.keys())
allseqsizes = np.array([seqsize[x] for x in allseqs])
seqstarts = np.cumsum(allseqsizes)
seqstarts = np.roll(seqstarts, 1)
total_bins = seqstarts[0]
seqstarts[0] = 0
seqstarts = dict(zip(allseqs, seqstarts))
return seqstarts, seqsize, total_bins | Go through the SQ headers and pull out all sequences with size
greater than the resolution settings, i.e. contains at least a few cells | Below is the the instruction that describes the task:
### Input:
Go through the SQ headers and pull out all sequences with size
greater than the resolution settings, i.e. contains at least a few cells
### Response:
def get_seqstarts(bamfile, N):
""" Go through the SQ headers and pull out all sequences with size
greater than the resolution settings, i.e. contains at least a few cells
"""
import pysam
bamfile = pysam.AlignmentFile(bamfile, "rb")
seqsize = {}
for kv in bamfile.header["SQ"]:
if kv["LN"] < 10 * N:
continue
seqsize[kv["SN"]] = kv["LN"] / N + 1
allseqs = natsorted(seqsize.keys())
allseqsizes = np.array([seqsize[x] for x in allseqs])
seqstarts = np.cumsum(allseqsizes)
seqstarts = np.roll(seqstarts, 1)
total_bins = seqstarts[0]
seqstarts[0] = 0
seqstarts = dict(zip(allseqs, seqstarts))
return seqstarts, seqsize, total_bins |
def remove(self, key):
"""T.remove(key) <==> del T[key], remove item <key> from tree."""
if self._root is None:
raise KeyError(str(key))
head = Node() # False tree root
node = head
node.right = self._root
parent = None
grand_parent = None
found = None # Found item
direction = 1
# Search and push a red down
while node[direction] is not None:
last = direction
# Update helpers
grand_parent = parent
parent = node
node = node[direction]
direction = 1 if (self._cmp(self._cmp_data, node.key, key) < 0) else 0
# Save found node
if self._cmp(self._cmp_data, key, node.key) == 0:
found = node
# Push the red node down
if not RBTree.is_red(node) and not RBTree.is_red(node[direction]):
if RBTree.is_red(node[1 - direction]):
parent[last] = RBTree.jsw_single(node, direction)
parent = parent[last]
elif not RBTree.is_red(node[1 - direction]):
sibling = parent[1 - last]
if sibling is not None:
if (not RBTree.is_red(sibling[1 - last])) and (not RBTree.is_red(sibling[last])):
# Color flip
parent.red = False
sibling.red = True
node.red = True
else:
direction2 = 1 if grand_parent.right is parent else 0
if RBTree.is_red(sibling[last]):
grand_parent[direction2] = RBTree.jsw_double(parent, last)
elif RBTree.is_red(sibling[1-last]):
grand_parent[direction2] = RBTree.jsw_single(parent, last)
# Ensure correct coloring
grand_parent[direction2].red = True
node.red = True
grand_parent[direction2].left.red = False
grand_parent[direction2].right.red = False
# Replace and remove if found
if found is not None:
found.key = node.key
found.value = node.value
parent[int(parent.right is node)] = node[int(node.left is None)]
node.free()
self._count -= 1
# Update root and make it black
self._root = head.right
if self._root is not None:
self._root.red = False
if not found:
raise KeyError(str(key)) | T.remove(key) <==> del T[key], remove item <key> from tree. | Below is the the instruction that describes the task:
### Input:
T.remove(key) <==> del T[key], remove item <key> from tree.
### Response:
def remove(self, key):
"""T.remove(key) <==> del T[key], remove item <key> from tree."""
if self._root is None:
raise KeyError(str(key))
head = Node() # False tree root
node = head
node.right = self._root
parent = None
grand_parent = None
found = None # Found item
direction = 1
# Search and push a red down
while node[direction] is not None:
last = direction
# Update helpers
grand_parent = parent
parent = node
node = node[direction]
direction = 1 if (self._cmp(self._cmp_data, node.key, key) < 0) else 0
# Save found node
if self._cmp(self._cmp_data, key, node.key) == 0:
found = node
# Push the red node down
if not RBTree.is_red(node) and not RBTree.is_red(node[direction]):
if RBTree.is_red(node[1 - direction]):
parent[last] = RBTree.jsw_single(node, direction)
parent = parent[last]
elif not RBTree.is_red(node[1 - direction]):
sibling = parent[1 - last]
if sibling is not None:
if (not RBTree.is_red(sibling[1 - last])) and (not RBTree.is_red(sibling[last])):
# Color flip
parent.red = False
sibling.red = True
node.red = True
else:
direction2 = 1 if grand_parent.right is parent else 0
if RBTree.is_red(sibling[last]):
grand_parent[direction2] = RBTree.jsw_double(parent, last)
elif RBTree.is_red(sibling[1-last]):
grand_parent[direction2] = RBTree.jsw_single(parent, last)
# Ensure correct coloring
grand_parent[direction2].red = True
node.red = True
grand_parent[direction2].left.red = False
grand_parent[direction2].right.red = False
# Replace and remove if found
if found is not None:
found.key = node.key
found.value = node.value
parent[int(parent.right is node)] = node[int(node.left is None)]
node.free()
self._count -= 1
# Update root and make it black
self._root = head.right
if self._root is not None:
self._root.red = False
if not found:
raise KeyError(str(key)) |
def from_table(fileobj=None, url='http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/knownGene.txt.gz',
parser=UCSCTable.KNOWN_GENE, mode='tx', decompress=None):
'''
UCSC Genome project provides several tables with gene coordinates (https://genome.ucsc.edu/cgi-bin/hgTables),
such as knownGene, refGene, ensGene, wgEncodeGencodeBasicV19, etc.
Indexing the rows of those tables into a ``GenomeIntervalTree`` is a common task, implemented in this method.
The table can be either specified as a ``fileobj`` (in which case the data is read line by line),
or via an ``url`` (the ``url`` may be to a ``txt`` or ``txt.gz`` file either online or locally).
The type of the table is specified using the ``parser`` parameter. This is a function that takes a line
of the file (with no line ending) and returns a dictionary, mapping field names to values. This dictionary will be assigned
to the ``data`` field of each interval in the resulting tree.
Finally, there are different ways genes can be mapped into intervals for the sake of indexing as an interval tree.
One way is to represent each gene via its transcribed region (``txStart``..``txEnd``). Another is to represent using
coding region (``cdsStart``..``cdsEnd``). Finally, the third possibility is to map each gene into several intervals,
corresponding to its exons (``exonStarts``..``exonEnds``).
The mode, in which genes are mapped to intervals is specified via the ``mode`` parameter. The value can be ``tx``, ``cds`` and
``exons``, corresponding to the three mentioned possibilities.
If a more specific way of interval-mapping is required (e.g. you might want to create 'coding-region+-10k' intervals), you can provide
an "interval-maker" function as the ``mode`` parameter. An interval-maker function takes as input a dictionary, returned by the parser,
and returns an iterable of Interval objects.
The ``parser`` function must ensure that its output contains the field named ``chrom``, and also fields named ``txStart``/``txEnd`` if ``mode=='tx'``,
fields ``cdsStart``/``cdsEnd`` if ``mode=='cds'``, and fields ``exonCount``/``exonStarts``/``exonEnds`` if ``mode=='exons'``.
The ``decompress`` parameter specifies whether the provided file is gzip-compressed.
This only applies to the situation when the url is given (no decompression is made if fileobj is provided in any case).
If decompress is None, data is decompressed if the url ends with .gz, otherwise decompress = True forces decompression.
>> knownGene = GenomeIntervalTree.from_table()
>> len(knownGene)
82960
>> result = knownGene[b'chr1'].search(100000, 138529)
>> len(result)
1
>> list(result)[0].data['name']
b'uc021oeg.2'
'''
if fileobj is None:
data = urlopen(url).read()
if (decompress is None and url.endswith('.gz')) or decompress:
data = zlib.decompress(data, 16+zlib.MAX_WBITS)
fileobj = BytesIO(data)
interval_lists = defaultdict(list)
if mode == 'tx':
interval_maker = IntervalMakers.TX
elif mode == 'cds':
interval_maker = IntervalMakers.CDS
elif mode == 'exons':
interval_maker = IntervalMakers.EXONS
elif getattr(mode, __call__, None) is None:
raise Exception("Parameter `mode` may only be 'tx', 'cds', 'exons' or a callable")
else:
interval_maker = mode
for ln in fileobj:
if not isinstance(ln, bytes):
ln = ln.encode()
ln = ln.strip()
d = parser(ln)
for interval in interval_maker(d):
interval_lists[d['chrom']].append(_fix(interval))
# Now convert interval lists into trees
gtree = GenomeIntervalTree()
for chrom, lst in getattr(interval_lists, 'iteritems', interval_lists.items)():
gtree[chrom] = IntervalTree(lst)
return gtree | UCSC Genome project provides several tables with gene coordinates (https://genome.ucsc.edu/cgi-bin/hgTables),
such as knownGene, refGene, ensGene, wgEncodeGencodeBasicV19, etc.
Indexing the rows of those tables into a ``GenomeIntervalTree`` is a common task, implemented in this method.
The table can be either specified as a ``fileobj`` (in which case the data is read line by line),
or via an ``url`` (the ``url`` may be to a ``txt`` or ``txt.gz`` file either online or locally).
The type of the table is specified using the ``parser`` parameter. This is a function that takes a line
of the file (with no line ending) and returns a dictionary, mapping field names to values. This dictionary will be assigned
to the ``data`` field of each interval in the resulting tree.
Finally, there are different ways genes can be mapped into intervals for the sake of indexing as an interval tree.
One way is to represent each gene via its transcribed region (``txStart``..``txEnd``). Another is to represent using
coding region (``cdsStart``..``cdsEnd``). Finally, the third possibility is to map each gene into several intervals,
corresponding to its exons (``exonStarts``..``exonEnds``).
The mode, in which genes are mapped to intervals is specified via the ``mode`` parameter. The value can be ``tx``, ``cds`` and
``exons``, corresponding to the three mentioned possibilities.
If a more specific way of interval-mapping is required (e.g. you might want to create 'coding-region+-10k' intervals), you can provide
an "interval-maker" function as the ``mode`` parameter. An interval-maker function takes as input a dictionary, returned by the parser,
and returns an iterable of Interval objects.
The ``parser`` function must ensure that its output contains the field named ``chrom``, and also fields named ``txStart``/``txEnd`` if ``mode=='tx'``,
fields ``cdsStart``/``cdsEnd`` if ``mode=='cds'``, and fields ``exonCount``/``exonStarts``/``exonEnds`` if ``mode=='exons'``.
The ``decompress`` parameter specifies whether the provided file is gzip-compressed.
This only applies to the situation when the url is given (no decompression is made if fileobj is provided in any case).
If decompress is None, data is decompressed if the url ends with .gz, otherwise decompress = True forces decompression.
>> knownGene = GenomeIntervalTree.from_table()
>> len(knownGene)
82960
>> result = knownGene[b'chr1'].search(100000, 138529)
>> len(result)
1
>> list(result)[0].data['name']
b'uc021oeg.2' | Below is the the instruction that describes the task:
### Input:
UCSC Genome project provides several tables with gene coordinates (https://genome.ucsc.edu/cgi-bin/hgTables),
such as knownGene, refGene, ensGene, wgEncodeGencodeBasicV19, etc.
Indexing the rows of those tables into a ``GenomeIntervalTree`` is a common task, implemented in this method.
The table can be either specified as a ``fileobj`` (in which case the data is read line by line),
or via an ``url`` (the ``url`` may be to a ``txt`` or ``txt.gz`` file either online or locally).
The type of the table is specified using the ``parser`` parameter. This is a function that takes a line
of the file (with no line ending) and returns a dictionary, mapping field names to values. This dictionary will be assigned
to the ``data`` field of each interval in the resulting tree.
Finally, there are different ways genes can be mapped into intervals for the sake of indexing as an interval tree.
One way is to represent each gene via its transcribed region (``txStart``..``txEnd``). Another is to represent using
coding region (``cdsStart``..``cdsEnd``). Finally, the third possibility is to map each gene into several intervals,
corresponding to its exons (``exonStarts``..``exonEnds``).
The mode, in which genes are mapped to intervals is specified via the ``mode`` parameter. The value can be ``tx``, ``cds`` and
``exons``, corresponding to the three mentioned possibilities.
If a more specific way of interval-mapping is required (e.g. you might want to create 'coding-region+-10k' intervals), you can provide
an "interval-maker" function as the ``mode`` parameter. An interval-maker function takes as input a dictionary, returned by the parser,
and returns an iterable of Interval objects.
The ``parser`` function must ensure that its output contains the field named ``chrom``, and also fields named ``txStart``/``txEnd`` if ``mode=='tx'``,
fields ``cdsStart``/``cdsEnd`` if ``mode=='cds'``, and fields ``exonCount``/``exonStarts``/``exonEnds`` if ``mode=='exons'``.
The ``decompress`` parameter specifies whether the provided file is gzip-compressed.
This only applies to the situation when the url is given (no decompression is made if fileobj is provided in any case).
If decompress is None, data is decompressed if the url ends with .gz, otherwise decompress = True forces decompression.
>> knownGene = GenomeIntervalTree.from_table()
>> len(knownGene)
82960
>> result = knownGene[b'chr1'].search(100000, 138529)
>> len(result)
1
>> list(result)[0].data['name']
b'uc021oeg.2'
### Response:
def from_table(fileobj=None, url='http://hgdownload.cse.ucsc.edu/goldenpath/hg19/database/knownGene.txt.gz',
parser=UCSCTable.KNOWN_GENE, mode='tx', decompress=None):
'''
UCSC Genome project provides several tables with gene coordinates (https://genome.ucsc.edu/cgi-bin/hgTables),
such as knownGene, refGene, ensGene, wgEncodeGencodeBasicV19, etc.
Indexing the rows of those tables into a ``GenomeIntervalTree`` is a common task, implemented in this method.
The table can be either specified as a ``fileobj`` (in which case the data is read line by line),
or via an ``url`` (the ``url`` may be to a ``txt`` or ``txt.gz`` file either online or locally).
The type of the table is specified using the ``parser`` parameter. This is a function that takes a line
of the file (with no line ending) and returns a dictionary, mapping field names to values. This dictionary will be assigned
to the ``data`` field of each interval in the resulting tree.
Finally, there are different ways genes can be mapped into intervals for the sake of indexing as an interval tree.
One way is to represent each gene via its transcribed region (``txStart``..``txEnd``). Another is to represent using
coding region (``cdsStart``..``cdsEnd``). Finally, the third possibility is to map each gene into several intervals,
corresponding to its exons (``exonStarts``..``exonEnds``).
The mode, in which genes are mapped to intervals is specified via the ``mode`` parameter. The value can be ``tx``, ``cds`` and
``exons``, corresponding to the three mentioned possibilities.
If a more specific way of interval-mapping is required (e.g. you might want to create 'coding-region+-10k' intervals), you can provide
an "interval-maker" function as the ``mode`` parameter. An interval-maker function takes as input a dictionary, returned by the parser,
and returns an iterable of Interval objects.
The ``parser`` function must ensure that its output contains the field named ``chrom``, and also fields named ``txStart``/``txEnd`` if ``mode=='tx'``,
fields ``cdsStart``/``cdsEnd`` if ``mode=='cds'``, and fields ``exonCount``/``exonStarts``/``exonEnds`` if ``mode=='exons'``.
The ``decompress`` parameter specifies whether the provided file is gzip-compressed.
This only applies to the situation when the url is given (no decompression is made if fileobj is provided in any case).
If decompress is None, data is decompressed if the url ends with .gz, otherwise decompress = True forces decompression.
>> knownGene = GenomeIntervalTree.from_table()
>> len(knownGene)
82960
>> result = knownGene[b'chr1'].search(100000, 138529)
>> len(result)
1
>> list(result)[0].data['name']
b'uc021oeg.2'
'''
if fileobj is None:
data = urlopen(url).read()
if (decompress is None and url.endswith('.gz')) or decompress:
data = zlib.decompress(data, 16+zlib.MAX_WBITS)
fileobj = BytesIO(data)
interval_lists = defaultdict(list)
if mode == 'tx':
interval_maker = IntervalMakers.TX
elif mode == 'cds':
interval_maker = IntervalMakers.CDS
elif mode == 'exons':
interval_maker = IntervalMakers.EXONS
elif getattr(mode, __call__, None) is None:
raise Exception("Parameter `mode` may only be 'tx', 'cds', 'exons' or a callable")
else:
interval_maker = mode
for ln in fileobj:
if not isinstance(ln, bytes):
ln = ln.encode()
ln = ln.strip()
d = parser(ln)
for interval in interval_maker(d):
interval_lists[d['chrom']].append(_fix(interval))
# Now convert interval lists into trees
gtree = GenomeIntervalTree()
for chrom, lst in getattr(interval_lists, 'iteritems', interval_lists.items)():
gtree[chrom] = IntervalTree(lst)
return gtree |
def _log_phi(z):
"""Stable computation of the log of the Normal CDF and its derivative."""
# Adapted from the GPML function `logphi.m`.
if z * z < 0.0492:
# First case: z close to zero.
coef = -z / SQRT2PI
val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0)
res = -2 * val - log(2)
dres = exp(-(z * z) / 2 - res) / SQRT2PI
elif z < -11.3137:
# Second case: z very small.
num = functools.reduce(
lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741)
den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0)
res = log(num / (2 * den)) - (z * z) / 2
dres = abs(den / num) * sqrt(2.0 / pi)
else:
res = log(normal_cdf(z))
dres = exp(-(z * z) / 2 - res) / SQRT2PI
return res, dres | Stable computation of the log of the Normal CDF and its derivative. | Below is the the instruction that describes the task:
### Input:
Stable computation of the log of the Normal CDF and its derivative.
### Response:
def _log_phi(z):
"""Stable computation of the log of the Normal CDF and its derivative."""
# Adapted from the GPML function `logphi.m`.
if z * z < 0.0492:
# First case: z close to zero.
coef = -z / SQRT2PI
val = functools.reduce(lambda acc, c: coef * (c + acc), CS, 0)
res = -2 * val - log(2)
dres = exp(-(z * z) / 2 - res) / SQRT2PI
elif z < -11.3137:
# Second case: z very small.
num = functools.reduce(
lambda acc, r: -z * acc / SQRT2 + r, RS, 0.5641895835477550741)
den = functools.reduce(lambda acc, q: -z * acc / SQRT2 + q, QS, 1.0)
res = log(num / (2 * den)) - (z * z) / 2
dres = abs(den / num) * sqrt(2.0 / pi)
else:
res = log(normal_cdf(z))
dres = exp(-(z * z) / 2 - res) / SQRT2PI
return res, dres |
def update_reduced_metric(self, name, value, key=None):
"""Update the value of ReducedMetric or MultiReducedMetric
:type name: str
:param name: name of the registered metric to be updated.
:param value: specifies a value to be reduced.
:type key: str or None
:param key: specifies a key for MultiReducedMetric. Needs to be `None` for updating
ReducedMetric.
"""
if name not in self.metrics:
Log.error("In update_reduced_metric(): %s is not registered in the metric", name)
if key is None and isinstance(self.metrics[name], ReducedMetric):
self.metrics[name].update(value)
elif key is not None and isinstance(self.metrics[name], MultiReducedMetric):
self.metrics[name].update(key, value)
else:
Log.error("In update_count(): %s is registered but not supported with this method", name) | Update the value of ReducedMetric or MultiReducedMetric
:type name: str
:param name: name of the registered metric to be updated.
:param value: specifies a value to be reduced.
:type key: str or None
:param key: specifies a key for MultiReducedMetric. Needs to be `None` for updating
ReducedMetric. | Below is the the instruction that describes the task:
### Input:
Update the value of ReducedMetric or MultiReducedMetric
:type name: str
:param name: name of the registered metric to be updated.
:param value: specifies a value to be reduced.
:type key: str or None
:param key: specifies a key for MultiReducedMetric. Needs to be `None` for updating
ReducedMetric.
### Response:
def update_reduced_metric(self, name, value, key=None):
"""Update the value of ReducedMetric or MultiReducedMetric
:type name: str
:param name: name of the registered metric to be updated.
:param value: specifies a value to be reduced.
:type key: str or None
:param key: specifies a key for MultiReducedMetric. Needs to be `None` for updating
ReducedMetric.
"""
if name not in self.metrics:
Log.error("In update_reduced_metric(): %s is not registered in the metric", name)
if key is None and isinstance(self.metrics[name], ReducedMetric):
self.metrics[name].update(value)
elif key is not None and isinstance(self.metrics[name], MultiReducedMetric):
self.metrics[name].update(key, value)
else:
Log.error("In update_count(): %s is registered but not supported with this method", name) |
def to_parameter_specs(self, name_prefix=""):
"""To list of dicts suitable for Cloud ML Engine hyperparameter tuning."""
specs = []
for name, categories, _ in self._categorical_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "CATEGORICAL",
"categoricalValues": categories,
}
specs.append(spec)
for name, feasible_points, scale, _ in self._discrete_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DISCRETE",
"discreteValues": feasible_points,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._float_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DOUBLE",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._int_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "INTEGER",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
return specs | To list of dicts suitable for Cloud ML Engine hyperparameter tuning. | Below is the the instruction that describes the task:
### Input:
To list of dicts suitable for Cloud ML Engine hyperparameter tuning.
### Response:
def to_parameter_specs(self, name_prefix=""):
"""To list of dicts suitable for Cloud ML Engine hyperparameter tuning."""
specs = []
for name, categories, _ in self._categorical_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "CATEGORICAL",
"categoricalValues": categories,
}
specs.append(spec)
for name, feasible_points, scale, _ in self._discrete_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DISCRETE",
"discreteValues": feasible_points,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._float_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "DOUBLE",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
for name, min_val, max_val, scale, _ in self._int_params.values():
spec = {
"parameterName": name_prefix + name,
"type": "INTEGER",
"minValue": min_val,
"maxValue": max_val,
}
if scale:
spec["scaleType"] = self.SCALES_STR[scale]
specs.append(spec)
return specs |
def strSlist(string):
""" Converts angle string to signed list. """
sign = '-' if string[0] == '-' else '+'
values = [abs(int(x)) for x in string.split(':')]
return _fixSlist(list(sign) + values) | Converts angle string to signed list. | Below is the the instruction that describes the task:
### Input:
Converts angle string to signed list.
### Response:
def strSlist(string):
""" Converts angle string to signed list. """
sign = '-' if string[0] == '-' else '+'
values = [abs(int(x)) for x in string.split(':')]
return _fixSlist(list(sign) + values) |
def match_window(in_data, offset):
'''Find the longest match for the string starting at offset in the preceeding data
'''
window_start = max(offset - WINDOW_MASK, 0)
for n in range(MAX_LEN, THRESHOLD-1, -1):
window_end = min(offset + n, len(in_data))
# we've not got enough data left for a meaningful result
if window_end - offset < THRESHOLD:
return None
str_to_find = in_data[offset:window_end]
idx = in_data.rfind(str_to_find, window_start, window_end-n)
if idx != -1:
code_offset = offset - idx # - 1
code_len = len(str_to_find)
return (code_offset, code_len)
return None | Find the longest match for the string starting at offset in the preceeding data | Below is the the instruction that describes the task:
### Input:
Find the longest match for the string starting at offset in the preceeding data
### Response:
def match_window(in_data, offset):
'''Find the longest match for the string starting at offset in the preceeding data
'''
window_start = max(offset - WINDOW_MASK, 0)
for n in range(MAX_LEN, THRESHOLD-1, -1):
window_end = min(offset + n, len(in_data))
# we've not got enough data left for a meaningful result
if window_end - offset < THRESHOLD:
return None
str_to_find = in_data[offset:window_end]
idx = in_data.rfind(str_to_find, window_start, window_end-n)
if idx != -1:
code_offset = offset - idx # - 1
code_len = len(str_to_find)
return (code_offset, code_len)
return None |
def K_globe_stop_check_valve_Crane(D1, D2, fd=None, style=0):
r'''Returns the loss coefficient for a globe stop check valve as shown in
[1]_.
If β = 1:
.. math::
K = K_1 = K_2 = N\cdot f_d
Otherwise:
.. math::
K_2 = \frac{K + \left[0.5(1-\beta^2) + (1-\beta^2)^2\right]}{\beta^4}
Style 0 is the standard form; style 1 is angled, with a restrition to force
the flow up through the valve; style 2 is also angled but with a smaller
restriction forcing the flow up. N is 400, 300, and 55 for those cases
respectively.
Parameters
----------
D1 : float
Diameter of the valve seat bore (must be smaller or equal to `D2`), [m]
D2 : float
Diameter of the pipe attached to the valve, [m]
fd : float, optional
Darcy friction factor calculated for the actual pipe flow in clean
steel (roughness = 0.0018 inch) in the fully developed turbulent
region; do not specify this to use the original Crane friction factor!,
[-]
style : int, optional
One of 0, 1, or 2; refers to three different types of angle valves
as shown in [1]_ [-]
Returns
-------
K : float
Loss coefficient with respect to the pipe inside diameter [-]
Notes
-----
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions.
Examples
--------
>>> K_globe_stop_check_valve_Crane(.1, .02, style=1)
4.5235076518969795
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
'''
if fd is None:
fd = ft_Crane(D2)
try:
K = globe_stop_check_valve_Crane_coeffs[style]*fd
except KeyError:
raise KeyError('Accepted valve styles are 0, 1, and 2 only')
beta = D1/D2
if beta == 1:
return K
else:
return (K + beta*(0.5*(1 - beta**2) + (1 - beta**2)**2))/beta**4 | r'''Returns the loss coefficient for a globe stop check valve as shown in
[1]_.
If β = 1:
.. math::
K = K_1 = K_2 = N\cdot f_d
Otherwise:
.. math::
K_2 = \frac{K + \left[0.5(1-\beta^2) + (1-\beta^2)^2\right]}{\beta^4}
Style 0 is the standard form; style 1 is angled, with a restrition to force
the flow up through the valve; style 2 is also angled but with a smaller
restriction forcing the flow up. N is 400, 300, and 55 for those cases
respectively.
Parameters
----------
D1 : float
Diameter of the valve seat bore (must be smaller or equal to `D2`), [m]
D2 : float
Diameter of the pipe attached to the valve, [m]
fd : float, optional
Darcy friction factor calculated for the actual pipe flow in clean
steel (roughness = 0.0018 inch) in the fully developed turbulent
region; do not specify this to use the original Crane friction factor!,
[-]
style : int, optional
One of 0, 1, or 2; refers to three different types of angle valves
as shown in [1]_ [-]
Returns
-------
K : float
Loss coefficient with respect to the pipe inside diameter [-]
Notes
-----
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions.
Examples
--------
>>> K_globe_stop_check_valve_Crane(.1, .02, style=1)
4.5235076518969795
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009. | Below is the the instruction that describes the task:
### Input:
r'''Returns the loss coefficient for a globe stop check valve as shown in
[1]_.
If β = 1:
.. math::
K = K_1 = K_2 = N\cdot f_d
Otherwise:
.. math::
K_2 = \frac{K + \left[0.5(1-\beta^2) + (1-\beta^2)^2\right]}{\beta^4}
Style 0 is the standard form; style 1 is angled, with a restrition to force
the flow up through the valve; style 2 is also angled but with a smaller
restriction forcing the flow up. N is 400, 300, and 55 for those cases
respectively.
Parameters
----------
D1 : float
Diameter of the valve seat bore (must be smaller or equal to `D2`), [m]
D2 : float
Diameter of the pipe attached to the valve, [m]
fd : float, optional
Darcy friction factor calculated for the actual pipe flow in clean
steel (roughness = 0.0018 inch) in the fully developed turbulent
region; do not specify this to use the original Crane friction factor!,
[-]
style : int, optional
One of 0, 1, or 2; refers to three different types of angle valves
as shown in [1]_ [-]
Returns
-------
K : float
Loss coefficient with respect to the pipe inside diameter [-]
Notes
-----
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions.
Examples
--------
>>> K_globe_stop_check_valve_Crane(.1, .02, style=1)
4.5235076518969795
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
### Response:
def K_globe_stop_check_valve_Crane(D1, D2, fd=None, style=0):
r'''Returns the loss coefficient for a globe stop check valve as shown in
[1]_.
If β = 1:
.. math::
K = K_1 = K_2 = N\cdot f_d
Otherwise:
.. math::
K_2 = \frac{K + \left[0.5(1-\beta^2) + (1-\beta^2)^2\right]}{\beta^4}
Style 0 is the standard form; style 1 is angled, with a restrition to force
the flow up through the valve; style 2 is also angled but with a smaller
restriction forcing the flow up. N is 400, 300, and 55 for those cases
respectively.
Parameters
----------
D1 : float
Diameter of the valve seat bore (must be smaller or equal to `D2`), [m]
D2 : float
Diameter of the pipe attached to the valve, [m]
fd : float, optional
Darcy friction factor calculated for the actual pipe flow in clean
steel (roughness = 0.0018 inch) in the fully developed turbulent
region; do not specify this to use the original Crane friction factor!,
[-]
style : int, optional
One of 0, 1, or 2; refers to three different types of angle valves
as shown in [1]_ [-]
Returns
-------
K : float
Loss coefficient with respect to the pipe inside diameter [-]
Notes
-----
This method is not valid in the laminar regime and the pressure drop will
be underestimated in those conditions.
Examples
--------
>>> K_globe_stop_check_valve_Crane(.1, .02, style=1)
4.5235076518969795
References
----------
.. [1] Crane Co. Flow of Fluids Through Valves, Fittings, and Pipe. Crane,
2009.
'''
if fd is None:
fd = ft_Crane(D2)
try:
K = globe_stop_check_valve_Crane_coeffs[style]*fd
except KeyError:
raise KeyError('Accepted valve styles are 0, 1, and 2 only')
beta = D1/D2
if beta == 1:
return K
else:
return (K + beta*(0.5*(1 - beta**2) + (1 - beta**2)**2))/beta**4 |
def _oauth_tokengetter(token=None):
"""
Default function to return the current user oauth token
from session cookie.
"""
token = session.get("oauth")
log.debug("Token Get: {0}".format(token))
return token | Default function to return the current user oauth token
from session cookie. | Below is the the instruction that describes the task:
### Input:
Default function to return the current user oauth token
from session cookie.
### Response:
def _oauth_tokengetter(token=None):
"""
Default function to return the current user oauth token
from session cookie.
"""
token = session.get("oauth")
log.debug("Token Get: {0}".format(token))
return token |
def dataset(self):
"""A Tablib Dataset containing the row."""
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data | A Tablib Dataset containing the row. | Below is the the instruction that describes the task:
### Input:
A Tablib Dataset containing the row.
### Response:
def dataset(self):
"""A Tablib Dataset containing the row."""
data = tablib.Dataset()
data.headers = self.keys()
row = _reduce_datetimes(self.values())
data.append(row)
return data |
def _quantile_function(self, alpha=0.5, smallest_count=None):
"""Return a function that returns the quantile values for this
histogram.
"""
total = float(self.total())
smallest_observed_count = min(itervalues(self))
if smallest_count is None:
smallest_count = smallest_observed_count
else:
smallest_count = min(smallest_count, smallest_observed_count)
beta = alpha * smallest_count
debug_plot = []
cumulative_sum = 0.0
inverse = sortedcontainers.SortedDict()
for value, count in iteritems(self):
debug_plot.append((cumulative_sum / total, value))
inverse[(cumulative_sum + beta) / total] = value
cumulative_sum += count
inverse[(cumulative_sum - beta) / total] = value
debug_plot.append((cumulative_sum / total, value))
# get maximum and minumum q values
q_min = inverse.iloc[0]
q_max = inverse.iloc[-1]
# this stuff if helpful for debugging -- keep it in here
# for i, j in debug_plot:
# print i, j
# print ''
# for i, j in inverse.iteritems():
# print i, j
# print ''
def function(q):
if q < 0.0 or q > 1.0:
msg = 'invalid quantile %s, need `0 <= q <= 1`' % q
raise ValueError(msg)
elif q < q_min:
q = q_min
elif q > q_max:
q = q_max
# if beta is
if beta > 0:
if q in inverse:
result = inverse[q]
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = (y2 - y1) * (q - x1) / float(x2 - x1) + y1
else:
if q in inverse:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = 0.5 * (y1 + y2)
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
result = inverse[x1]
return float(result)
return function | Return a function that returns the quantile values for this
histogram. | Below is the the instruction that describes the task:
### Input:
Return a function that returns the quantile values for this
histogram.
### Response:
def _quantile_function(self, alpha=0.5, smallest_count=None):
"""Return a function that returns the quantile values for this
histogram.
"""
total = float(self.total())
smallest_observed_count = min(itervalues(self))
if smallest_count is None:
smallest_count = smallest_observed_count
else:
smallest_count = min(smallest_count, smallest_observed_count)
beta = alpha * smallest_count
debug_plot = []
cumulative_sum = 0.0
inverse = sortedcontainers.SortedDict()
for value, count in iteritems(self):
debug_plot.append((cumulative_sum / total, value))
inverse[(cumulative_sum + beta) / total] = value
cumulative_sum += count
inverse[(cumulative_sum - beta) / total] = value
debug_plot.append((cumulative_sum / total, value))
# get maximum and minumum q values
q_min = inverse.iloc[0]
q_max = inverse.iloc[-1]
# this stuff if helpful for debugging -- keep it in here
# for i, j in debug_plot:
# print i, j
# print ''
# for i, j in inverse.iteritems():
# print i, j
# print ''
def function(q):
if q < 0.0 or q > 1.0:
msg = 'invalid quantile %s, need `0 <= q <= 1`' % q
raise ValueError(msg)
elif q < q_min:
q = q_min
elif q > q_max:
q = q_max
# if beta is
if beta > 0:
if q in inverse:
result = inverse[q]
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = (y2 - y1) * (q - x1) / float(x2 - x1) + y1
else:
if q in inverse:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = 0.5 * (y1 + y2)
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
result = inverse[x1]
return float(result)
return function |
def check_contiguity(w, neighbors, leaver):
"""Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>>
"""
ids = neighbors[:]
ids.remove(leaver)
return is_component(w, ids) | Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>> | Below is the the instruction that describes the task:
### Input:
Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>>
### Response:
def check_contiguity(w, neighbors, leaver):
"""Check if contiguity is maintained if leaver is removed from neighbors
Parameters
----------
w : spatial weights object
simple contiguity based weights
neighbors : list
nodes that are to be checked if they form a single \
connected component
leaver : id
a member of neighbors to check for removal
Returns
-------
True : if removing leaver from neighbors does not break contiguity
of remaining set
in neighbors
False : if removing leaver from neighbors breaks contiguity
Example
-------
Setup imports and a 25x25 spatial weights matrix on a 5x5 square region.
>>> import libpysal as lps
>>> w = lps.weights.lat2W(5, 5)
Test removing various areas from a subset of the region's areas. In the
first case the subset is defined as observations 0, 1, 2, 3 and 4. The
test shows that observations 0, 1, 2 and 3 remain connected even if
observation 4 is removed.
>>> check_contiguity(w,[0,1,2,3,4],4)
True
>>> check_contiguity(w,[0,1,2,3,4],3)
False
>>> check_contiguity(w,[0,1,2,3,4],0)
True
>>> check_contiguity(w,[0,1,2,3,4],1)
False
>>>
"""
ids = neighbors[:]
ids.remove(leaver)
return is_component(w, ids) |
def update(self, new_details, old_details=None):
''' a method to upsert changes to a record in the table
:param new_details: dictionary with updated record fields
:param old_details: [optional] dictionary with original record fields
:return: list of dictionaries with updated field details
NOTE: if old_details is empty, method will poll database for the
most recent version of the record with which to compare the
new details for changes
'''
title = '%s.update' % self.__class__.__name__
# validate inputs
input_fields = {
'new_details': new_details,
'old_details': old_details
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
if old_details:
if new_details['id'] != old_details['id']:
raise ValueError('%s old_details["id"] value must match new_details["id"]' % title)
# extract primary key
primary_key = new_details['id']
# # handle missing id
# if not '.id' in self.model.keyMap.keys():
# del new_details['id']
# if old_details:
# del old_details['id']
# validate new details against record model
new_details = self.model.validate(new_details)
# retrieve old record if not specified
if not old_details:
try:
old_details = self.read(primary_key)
except:
raise ValueError('%s new_details["id"] does not exist.' % title)
# determine record differences
from labpack.parsing.comparison import compare_records
update_list = compare_records(new_details, old_details)
# construct update keywords
update_kwargs = {}
for update in update_list:
if update['action'] not in ('DELETE', 'REMOVE'):
current_details = new_details
save_path = ''
for segment in update['path']:
if save_path:
save_path += '.'
save_path += segment
if isinstance(current_details[segment], dict):
current_details = current_details[segment]
continue
elif isinstance(current_details[segment], list):
update_kwargs[save_path] = pickle.dumps(current_details[segment])
break
else:
update_kwargs[save_path] = current_details[segment]
else:
current_details = old_details
save_path = ''
for i in range(len(update['path'])):
segment = update['path'][i]
if save_path:
save_path += '.'
save_path += segment
if update['action'] == 'DELETE' and i + 1 == len(update['path']):
update_kwargs[save_path] = None
elif isinstance(current_details[segment], dict):
current_details = current_details[segment]
continue
elif isinstance(current_details[segment], list):
update_kwargs[save_path] = pickle.dumps(new_details[segment])
break
else:
update_kwargs[save_path] = None
# send update command
if update_kwargs:
update_statement = self.table.update(self.table.c.id==primary_key).values(**update_kwargs)
self.session.execute(update_statement)
return update_list | a method to upsert changes to a record in the table
:param new_details: dictionary with updated record fields
:param old_details: [optional] dictionary with original record fields
:return: list of dictionaries with updated field details
NOTE: if old_details is empty, method will poll database for the
most recent version of the record with which to compare the
new details for changes | Below is the the instruction that describes the task:
### Input:
a method to upsert changes to a record in the table
:param new_details: dictionary with updated record fields
:param old_details: [optional] dictionary with original record fields
:return: list of dictionaries with updated field details
NOTE: if old_details is empty, method will poll database for the
most recent version of the record with which to compare the
new details for changes
### Response:
def update(self, new_details, old_details=None):
''' a method to upsert changes to a record in the table
:param new_details: dictionary with updated record fields
:param old_details: [optional] dictionary with original record fields
:return: list of dictionaries with updated field details
NOTE: if old_details is empty, method will poll database for the
most recent version of the record with which to compare the
new details for changes
'''
title = '%s.update' % self.__class__.__name__
# validate inputs
input_fields = {
'new_details': new_details,
'old_details': old_details
}
for key, value in input_fields.items():
if value:
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
if old_details:
if new_details['id'] != old_details['id']:
raise ValueError('%s old_details["id"] value must match new_details["id"]' % title)
# extract primary key
primary_key = new_details['id']
# # handle missing id
# if not '.id' in self.model.keyMap.keys():
# del new_details['id']
# if old_details:
# del old_details['id']
# validate new details against record model
new_details = self.model.validate(new_details)
# retrieve old record if not specified
if not old_details:
try:
old_details = self.read(primary_key)
except:
raise ValueError('%s new_details["id"] does not exist.' % title)
# determine record differences
from labpack.parsing.comparison import compare_records
update_list = compare_records(new_details, old_details)
# construct update keywords
update_kwargs = {}
for update in update_list:
if update['action'] not in ('DELETE', 'REMOVE'):
current_details = new_details
save_path = ''
for segment in update['path']:
if save_path:
save_path += '.'
save_path += segment
if isinstance(current_details[segment], dict):
current_details = current_details[segment]
continue
elif isinstance(current_details[segment], list):
update_kwargs[save_path] = pickle.dumps(current_details[segment])
break
else:
update_kwargs[save_path] = current_details[segment]
else:
current_details = old_details
save_path = ''
for i in range(len(update['path'])):
segment = update['path'][i]
if save_path:
save_path += '.'
save_path += segment
if update['action'] == 'DELETE' and i + 1 == len(update['path']):
update_kwargs[save_path] = None
elif isinstance(current_details[segment], dict):
current_details = current_details[segment]
continue
elif isinstance(current_details[segment], list):
update_kwargs[save_path] = pickle.dumps(new_details[segment])
break
else:
update_kwargs[save_path] = None
# send update command
if update_kwargs:
update_statement = self.table.update(self.table.c.id==primary_key).values(**update_kwargs)
self.session.execute(update_statement)
return update_list |
def get_device_info(self, bigip):
'''Get device information about a specific BigIP device.
:param bigip: bigip object --- device to inspect
:returns: bigip object
'''
coll = bigip.tm.cm.devices.get_collection()
device = [device for device in coll if device.selfDevice == 'true']
assert len(device) == 1
return device[0] | Get device information about a specific BigIP device.
:param bigip: bigip object --- device to inspect
:returns: bigip object | Below is the the instruction that describes the task:
### Input:
Get device information about a specific BigIP device.
:param bigip: bigip object --- device to inspect
:returns: bigip object
### Response:
def get_device_info(self, bigip):
'''Get device information about a specific BigIP device.
:param bigip: bigip object --- device to inspect
:returns: bigip object
'''
coll = bigip.tm.cm.devices.get_collection()
device = [device for device in coll if device.selfDevice == 'true']
assert len(device) == 1
return device[0] |
def _fetch_result(self):
"""
Fetch the queried object.
"""
self._result = self.conn.query_single(self.object_type, self.url_params, self.query_params) | Fetch the queried object. | Below is the the instruction that describes the task:
### Input:
Fetch the queried object.
### Response:
def _fetch_result(self):
"""
Fetch the queried object.
"""
self._result = self.conn.query_single(self.object_type, self.url_params, self.query_params) |
def groupby_task_class(self):
"""
Returns a dictionary mapping the task class to the list of tasks in the flow
"""
# Find all Task classes
class2tasks = OrderedDict()
for task in self.iflat_tasks():
cls = task.__class__
if cls not in class2tasks: class2tasks[cls] = []
class2tasks[cls].append(task)
return class2tasks | Returns a dictionary mapping the task class to the list of tasks in the flow | Below is the the instruction that describes the task:
### Input:
Returns a dictionary mapping the task class to the list of tasks in the flow
### Response:
def groupby_task_class(self):
"""
Returns a dictionary mapping the task class to the list of tasks in the flow
"""
# Find all Task classes
class2tasks = OrderedDict()
for task in self.iflat_tasks():
cls = task.__class__
if cls not in class2tasks: class2tasks[cls] = []
class2tasks[cls].append(task)
return class2tasks |
def updateActiveMarkupClass(self):
'''
Update the active markup class based on the default class and
the current filename. If the active markup class changes, the
highlighter is rerun on the input text, the markup object of
this tab is replaced with one of the new class and the
activeMarkupChanged signal is emitted.
'''
previousMarkupClass = self.activeMarkupClass
self.activeMarkupClass = find_markup_class_by_name(globalSettings.defaultMarkup)
if self._fileName:
markupClass = get_markup_for_file_name(
self._fileName, return_class=True)
if markupClass:
self.activeMarkupClass = markupClass
if self.activeMarkupClass != previousMarkupClass:
self.highlighter.docType = self.activeMarkupClass.name if self.activeMarkupClass else None
self.highlighter.rehighlight()
self.activeMarkupChanged.emit()
self.triggerPreviewUpdate() | Update the active markup class based on the default class and
the current filename. If the active markup class changes, the
highlighter is rerun on the input text, the markup object of
this tab is replaced with one of the new class and the
activeMarkupChanged signal is emitted. | Below is the the instruction that describes the task:
### Input:
Update the active markup class based on the default class and
the current filename. If the active markup class changes, the
highlighter is rerun on the input text, the markup object of
this tab is replaced with one of the new class and the
activeMarkupChanged signal is emitted.
### Response:
def updateActiveMarkupClass(self):
'''
Update the active markup class based on the default class and
the current filename. If the active markup class changes, the
highlighter is rerun on the input text, the markup object of
this tab is replaced with one of the new class and the
activeMarkupChanged signal is emitted.
'''
previousMarkupClass = self.activeMarkupClass
self.activeMarkupClass = find_markup_class_by_name(globalSettings.defaultMarkup)
if self._fileName:
markupClass = get_markup_for_file_name(
self._fileName, return_class=True)
if markupClass:
self.activeMarkupClass = markupClass
if self.activeMarkupClass != previousMarkupClass:
self.highlighter.docType = self.activeMarkupClass.name if self.activeMarkupClass else None
self.highlighter.rehighlight()
self.activeMarkupChanged.emit()
self.triggerPreviewUpdate() |
def get_current(self, layout=None, network=None, verbose=False):
"""
Returns the current view or null if there is none.
:param verbose: print more
:returns: current view or null if there is none
"""
PARAMS={}
response=api(url=self.__url+"/get_current", PARAMS=PARAMS, method="POST", verbose=verbose)
return response | Returns the current view or null if there is none.
:param verbose: print more
:returns: current view or null if there is none | Below is the the instruction that describes the task:
### Input:
Returns the current view or null if there is none.
:param verbose: print more
:returns: current view or null if there is none
### Response:
def get_current(self, layout=None, network=None, verbose=False):
"""
Returns the current view or null if there is none.
:param verbose: print more
:returns: current view or null if there is none
"""
PARAMS={}
response=api(url=self.__url+"/get_current", PARAMS=PARAMS, method="POST", verbose=verbose)
return response |
def connect(self):
"""Connect to vCenter server"""
try:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
if self.config['no_ssl_verify']:
requests.packages.urllib3.disable_warnings()
context.verify_mode = ssl.CERT_NONE
self.si = SmartConnectNoSSL(
host=self.config['server'],
user=self.config['username'],
pwd=self.config['password'],
port=int(self.config['port']),
certFile=None,
keyFile=None,
)
else:
self.si = SmartConnect(
host=self.config['server'],
user=self.config['username'],
pwd=self.config['password'],
port=int(self.config['port']),
sslContext=context,
certFile=None,
keyFile=None,
)
except Exception as e:
print('Unable to connect to vsphere server.')
print(e)
sys.exit(1)
# add a clean up routine
atexit.register(Disconnect, self.si)
self.content = self.si.RetrieveContent() | Connect to vCenter server | Below is the the instruction that describes the task:
### Input:
Connect to vCenter server
### Response:
def connect(self):
"""Connect to vCenter server"""
try:
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
if self.config['no_ssl_verify']:
requests.packages.urllib3.disable_warnings()
context.verify_mode = ssl.CERT_NONE
self.si = SmartConnectNoSSL(
host=self.config['server'],
user=self.config['username'],
pwd=self.config['password'],
port=int(self.config['port']),
certFile=None,
keyFile=None,
)
else:
self.si = SmartConnect(
host=self.config['server'],
user=self.config['username'],
pwd=self.config['password'],
port=int(self.config['port']),
sslContext=context,
certFile=None,
keyFile=None,
)
except Exception as e:
print('Unable to connect to vsphere server.')
print(e)
sys.exit(1)
# add a clean up routine
atexit.register(Disconnect, self.si)
self.content = self.si.RetrieveContent() |
def _get_extension_loader_mapping(self):
"""
:return: Mappings of format extension and loader class.
:rtype: dict
"""
loader_table = self._get_common_loader_mapping()
loader_table.update(
{
"htm": HtmlTableFileLoader,
"md": MarkdownTableFileLoader,
"sqlite3": SqliteFileLoader,
"xlsx": ExcelTableFileLoader,
"xls": ExcelTableFileLoader,
}
)
return loader_table | :return: Mappings of format extension and loader class.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
:return: Mappings of format extension and loader class.
:rtype: dict
### Response:
def _get_extension_loader_mapping(self):
"""
:return: Mappings of format extension and loader class.
:rtype: dict
"""
loader_table = self._get_common_loader_mapping()
loader_table.update(
{
"htm": HtmlTableFileLoader,
"md": MarkdownTableFileLoader,
"sqlite3": SqliteFileLoader,
"xlsx": ExcelTableFileLoader,
"xls": ExcelTableFileLoader,
}
)
return loader_table |
def upload(
cls, files, metadata=None, tags=None, project=None, coerce_ascii=False, progressbar=None
):
"""Uploads a series of files to the One Codex server.
Parameters
----------
files : `string` or `tuple`
A single path to a file on the system, or a tuple containing a pairs of paths. Tuple
values will be interleaved as paired-end reads and both files should contain the same
number of records. Paths to single files will be uploaded as-is.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
coerce_ascii : `bool`, optional
If true, rename unicode filenames to ASCII and issue warning.
progressbar : `click.progressbar`, optional
If passed, display a progress bar using Click.
Returns
-------
A `Samples` object upon successful upload. None if the upload failed.
"""
res = cls._resource
if not isinstance(files, string_types) and not isinstance(files, tuple):
raise OneCodexException(
"Please pass a string or tuple or forward and reverse filepaths."
)
if not isinstance(project, Projects) and project is not None:
project_search = Projects.get(project)
if not project_search:
project_search = Projects.where(name=project)
if not project_search:
try:
project_search = Projects.where(project_name=project)
except HTTPError:
project_search = None
if not project_search:
raise OneCodexException("{} is not a valid project UUID".format(project))
if isinstance(project_search, list):
project = project_search[0]
sample_id = upload_sequence(
files,
res._client.session,
res,
metadata=metadata,
tags=tags,
project=project,
coerce_ascii=coerce_ascii,
progressbar=progressbar,
)
return cls.get(sample_id) | Uploads a series of files to the One Codex server.
Parameters
----------
files : `string` or `tuple`
A single path to a file on the system, or a tuple containing a pairs of paths. Tuple
values will be interleaved as paired-end reads and both files should contain the same
number of records. Paths to single files will be uploaded as-is.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
coerce_ascii : `bool`, optional
If true, rename unicode filenames to ASCII and issue warning.
progressbar : `click.progressbar`, optional
If passed, display a progress bar using Click.
Returns
-------
A `Samples` object upon successful upload. None if the upload failed. | Below is the the instruction that describes the task:
### Input:
Uploads a series of files to the One Codex server.
Parameters
----------
files : `string` or `tuple`
A single path to a file on the system, or a tuple containing a pairs of paths. Tuple
values will be interleaved as paired-end reads and both files should contain the same
number of records. Paths to single files will be uploaded as-is.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
coerce_ascii : `bool`, optional
If true, rename unicode filenames to ASCII and issue warning.
progressbar : `click.progressbar`, optional
If passed, display a progress bar using Click.
Returns
-------
A `Samples` object upon successful upload. None if the upload failed.
### Response:
def upload(
cls, files, metadata=None, tags=None, project=None, coerce_ascii=False, progressbar=None
):
"""Uploads a series of files to the One Codex server.
Parameters
----------
files : `string` or `tuple`
A single path to a file on the system, or a tuple containing a pairs of paths. Tuple
values will be interleaved as paired-end reads and both files should contain the same
number of records. Paths to single files will be uploaded as-is.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
coerce_ascii : `bool`, optional
If true, rename unicode filenames to ASCII and issue warning.
progressbar : `click.progressbar`, optional
If passed, display a progress bar using Click.
Returns
-------
A `Samples` object upon successful upload. None if the upload failed.
"""
res = cls._resource
if not isinstance(files, string_types) and not isinstance(files, tuple):
raise OneCodexException(
"Please pass a string or tuple or forward and reverse filepaths."
)
if not isinstance(project, Projects) and project is not None:
project_search = Projects.get(project)
if not project_search:
project_search = Projects.where(name=project)
if not project_search:
try:
project_search = Projects.where(project_name=project)
except HTTPError:
project_search = None
if not project_search:
raise OneCodexException("{} is not a valid project UUID".format(project))
if isinstance(project_search, list):
project = project_search[0]
sample_id = upload_sequence(
files,
res._client.session,
res,
metadata=metadata,
tags=tags,
project=project,
coerce_ascii=coerce_ascii,
progressbar=progressbar,
)
return cls.get(sample_id) |
def _setUpElements(self):
"""TODO: Remove this method
This method ONLY sets up the instance attributes.
Dependency instance attribute:
mgContent -- expected to be either a complex definition
with model group content, a model group, or model group
content. TODO: should only support the first two.
"""
self.logger.debug("_setUpElements: %s" %self._item.getItemTrace())
if hasattr(self, '_done'):
#return '\n'.join(self.elementAttrs)
return
self._done = True
flat = []
content = self.mgContent
if type(self.mgContent) is not tuple:
mg = self.mgContent
if not mg.isModelGroup():
mg = mg.content
content = mg.content
if mg.isAll():
flat = content
content = []
elif mg.isModelGroup() and mg.isDefinition():
mg = mg.content
content = mg.content
idx = 0
content = list(content)
while idx < len(content):
c = orig = content[idx]
if c.isElement():
flat.append(c)
idx += 1
continue
if c.isReference() and c.isModelGroup():
c = c.getModelGroupReference()
if c.isDefinition() and c.isModelGroup():
c = c.content
if c.isSequence() or c.isChoice():
begIdx = idx
endIdx = begIdx + len(c.content)
for i in range(begIdx, endIdx):
content.insert(i, c.content[i-begIdx])
content.remove(orig)
continue
raise ContainerError, 'unexpected schema item: %s' %c.getItemTrace()
for c in flat:
if c.isDeclaration() and c.isElement():
defaultValue = "None"
parent = c
defs = []
# stop recursion via global ModelGroupDefinition
while defs.count(parent) <= 1:
maxOccurs = parent.getAttribute('maxOccurs')
if maxOccurs == 'unbounded' or int(maxOccurs) > 1:
defaultValue = "[]"
break
parent = parent._parent()
if not parent.isModelGroup():
break
if parent.isReference():
parent = parent.getModelGroupReference()
if parent.isDefinition():
parent = parent.content
defs.append(parent)
if None == c.getAttribute('name') and c.isWildCard():
e = '%sself.%s = %s' %(ID3,
self.getAttributeName('any'), defaultValue)
else:
e = '%sself.%s = %s' %(ID3,
self.getAttributeName(c.getAttribute('name')), defaultValue)
self.elementAttrs.append(e)
continue
# TODO: This seems wrong
if c.isReference():
e = '%sself._%s = None' %(ID3,
self.mangle(c.getAttribute('ref')[1]))
self.elementAttrs.append(e)
continue
raise ContainerError, 'unexpected item: %s' % c.getItemTrace()
#return '\n'.join(self.elementAttrs)
return | TODO: Remove this method
This method ONLY sets up the instance attributes.
Dependency instance attribute:
mgContent -- expected to be either a complex definition
with model group content, a model group, or model group
content. TODO: should only support the first two. | Below is the the instruction that describes the task:
### Input:
TODO: Remove this method
This method ONLY sets up the instance attributes.
Dependency instance attribute:
mgContent -- expected to be either a complex definition
with model group content, a model group, or model group
content. TODO: should only support the first two.
### Response:
def _setUpElements(self):
"""TODO: Remove this method
This method ONLY sets up the instance attributes.
Dependency instance attribute:
mgContent -- expected to be either a complex definition
with model group content, a model group, or model group
content. TODO: should only support the first two.
"""
self.logger.debug("_setUpElements: %s" %self._item.getItemTrace())
if hasattr(self, '_done'):
#return '\n'.join(self.elementAttrs)
return
self._done = True
flat = []
content = self.mgContent
if type(self.mgContent) is not tuple:
mg = self.mgContent
if not mg.isModelGroup():
mg = mg.content
content = mg.content
if mg.isAll():
flat = content
content = []
elif mg.isModelGroup() and mg.isDefinition():
mg = mg.content
content = mg.content
idx = 0
content = list(content)
while idx < len(content):
c = orig = content[idx]
if c.isElement():
flat.append(c)
idx += 1
continue
if c.isReference() and c.isModelGroup():
c = c.getModelGroupReference()
if c.isDefinition() and c.isModelGroup():
c = c.content
if c.isSequence() or c.isChoice():
begIdx = idx
endIdx = begIdx + len(c.content)
for i in range(begIdx, endIdx):
content.insert(i, c.content[i-begIdx])
content.remove(orig)
continue
raise ContainerError, 'unexpected schema item: %s' %c.getItemTrace()
for c in flat:
if c.isDeclaration() and c.isElement():
defaultValue = "None"
parent = c
defs = []
# stop recursion via global ModelGroupDefinition
while defs.count(parent) <= 1:
maxOccurs = parent.getAttribute('maxOccurs')
if maxOccurs == 'unbounded' or int(maxOccurs) > 1:
defaultValue = "[]"
break
parent = parent._parent()
if not parent.isModelGroup():
break
if parent.isReference():
parent = parent.getModelGroupReference()
if parent.isDefinition():
parent = parent.content
defs.append(parent)
if None == c.getAttribute('name') and c.isWildCard():
e = '%sself.%s = %s' %(ID3,
self.getAttributeName('any'), defaultValue)
else:
e = '%sself.%s = %s' %(ID3,
self.getAttributeName(c.getAttribute('name')), defaultValue)
self.elementAttrs.append(e)
continue
# TODO: This seems wrong
if c.isReference():
e = '%sself._%s = None' %(ID3,
self.mangle(c.getAttribute('ref')[1]))
self.elementAttrs.append(e)
continue
raise ContainerError, 'unexpected item: %s' % c.getItemTrace()
#return '\n'.join(self.elementAttrs)
return |
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri) | Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains | Below is the the instruction that describes the task:
### Input:
Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
### Response:
def related(self, domain):
'''Get the related domains of the given domain.
For details, see https://investigate.umbrella.com/docs/api#relatedDomains
'''
uri = self._uris["related"].format(domain)
return self.get_parse(uri) |
def get_absolute_path(cls, roots, path):
"""Returns the absolute location of ``path`` relative to one of
the ``roots``.
``roots`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
"""
for root in roots:
abspath = os.path.abspath(os.path.join(root, path))
if abspath.startswith(root) and os.path.exists(abspath):
return abspath
# XXX TODO
return 'file-not-found' | Returns the absolute location of ``path`` relative to one of
the ``roots``.
``roots`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting). | Below is the the instruction that describes the task:
### Input:
Returns the absolute location of ``path`` relative to one of
the ``roots``.
``roots`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
### Response:
def get_absolute_path(cls, roots, path):
"""Returns the absolute location of ``path`` relative to one of
the ``roots``.
``roots`` is the path configured for this `StaticFileHandler`
(in most cases the ``static_path`` `Application` setting).
"""
for root in roots:
abspath = os.path.abspath(os.path.join(root, path))
if abspath.startswith(root) and os.path.exists(abspath):
return abspath
# XXX TODO
return 'file-not-found' |
def _build_message(self):
"""
Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
"""
if self.message_type in ['text', 'markdown']:
data = {
'msgtype': self.message_type,
self.message_type: {
'content': self.message
} if self.message_type == 'text' else self.message,
'at': {
'atMobiles': self.at_mobiles,
'isAtAll': self.at_all
}
}
else:
data = {
'msgtype': self.message_type,
self.message_type: self.message
}
return json.dumps(data) | Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}`` | Below is the the instruction that describes the task:
### Input:
Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
### Response:
def _build_message(self):
"""
Build different type of Dingding message
As most commonly used type, text message just need post message content
rather than a dict like ``{'content': 'message'}``
"""
if self.message_type in ['text', 'markdown']:
data = {
'msgtype': self.message_type,
self.message_type: {
'content': self.message
} if self.message_type == 'text' else self.message,
'at': {
'atMobiles': self.at_mobiles,
'isAtAll': self.at_all
}
}
else:
data = {
'msgtype': self.message_type,
self.message_type: self.message
}
return json.dumps(data) |
def cluster_node_add(node, extra_args=None):
'''
Add a node to the pacemaker cluster via pcs command
node
node that should be added
extra_args
list of extra option for the \'pcs cluster node add\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_node_add node=node2.example.org
'''
cmd = ['pcs', 'cluster', 'node', 'add']
cmd += [node]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
return __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) | Add a node to the pacemaker cluster via pcs command
node
node that should be added
extra_args
list of extra option for the \'pcs cluster node add\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_node_add node=node2.example.org | Below is the the instruction that describes the task:
### Input:
Add a node to the pacemaker cluster via pcs command
node
node that should be added
extra_args
list of extra option for the \'pcs cluster node add\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_node_add node=node2.example.org
### Response:
def cluster_node_add(node, extra_args=None):
'''
Add a node to the pacemaker cluster via pcs command
node
node that should be added
extra_args
list of extra option for the \'pcs cluster node add\' command
CLI Example:
.. code-block:: bash
salt '*' pcs.cluster_node_add node=node2.example.org
'''
cmd = ['pcs', 'cluster', 'node', 'add']
cmd += [node]
if isinstance(extra_args, (list, tuple)):
cmd += extra_args
return __salt__['cmd.run_all'](cmd, output_loglevel='trace', python_shell=False) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.