code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def build_defines(defines):
"""Build a list of `-D` directives to pass to the compiler.
This will drop any definitions whose value is None so that
you can get rid of a define from another architecture by
setting its value to null in the `module_settings.json`.
"""
return ['-D"%s=%s"' % (x, str(y)) for x, y in defines.items() if y is not None]
|
Build a list of `-D` directives to pass to the compiler.
This will drop any definitions whose value is None so that
you can get rid of a define from another architecture by
setting its value to null in the `module_settings.json`.
|
def set_mask(self, mask_img):
"""Sets a mask img to this. So every operation to self, this mask will be taken into account.
Parameters
----------
mask_img: nifti-like image, NeuroImage or str
3D mask array: True where a voxel should be used.
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Note
----
self.img and mask_file must have the same shape.
Raises
------
FileNotFound, NiftiFilesNotCompatible
"""
mask = load_mask(mask_img, allow_empty=True)
check_img_compatibility(self.img, mask, only_check_3d=True) # this will raise an exception if something is wrong
self.mask = mask
|
Sets a mask img to this. So every operation to self, this mask will be taken into account.
Parameters
----------
mask_img: nifti-like image, NeuroImage or str
3D mask array: True where a voxel should be used.
Can either be:
- a file path to a Nifti image
- any object with get_data() and get_affine() methods, e.g., nibabel.Nifti1Image.
If niimg is a string, consider it as a path to Nifti image and
call nibabel.load on it. If it is an object, check if get_data()
and get_affine() methods are present, raise TypeError otherwise.
Note
----
self.img and mask_file must have the same shape.
Raises
------
FileNotFound, NiftiFilesNotCompatible
|
def available_ports(low=1024, high=65535, exclude_ranges=None):
"""
Returns a set of possible ports (excluding system,
ephemeral and well-known ports).
Pass ``high`` and/or ``low`` to limit the port range.
"""
if exclude_ranges is None:
exclude_ranges = []
available = utils.ranges_to_set(UNASSIGNED_RANGES)
exclude = utils.ranges_to_set(
ephemeral.port_ranges() + exclude_ranges +
[
SYSTEM_PORT_RANGE,
(SYSTEM_PORT_RANGE[1], low),
(high, 65536)
]
)
return available.difference(exclude)
|
Returns a set of possible ports (excluding system,
ephemeral and well-known ports).
Pass ``high`` and/or ``low`` to limit the port range.
|
def send_email(sender,
subject,
content,
email_recipient_list,
email_address_list,
email_user=None,
email_pass=None,
email_server=None):
'''This sends an email to addresses, informing them about events.
The email account settings are retrieved from the settings file as described
above.
Parameters
----------
sender : str
The name of the sender to use in the email header.
subject : str
Subject of the email.
content : str
Content of the email.
email_recipient list : list of str
This is a list of email recipient names of the form:
`['Example Person 1', 'Example Person 1', ...]`
email_recipient list : list of str
This is a list of email recipient addresses of the form:
`['example1@example.com', 'example2@example.org', ...]`
email_user : str
The username of the email server account that will send the emails. If
this is None, the value of EMAIL_USER from the
~/.astrobase/.emailsettings file will be used. If that is None as well,
this function won't work.
email_pass : str
The password of the email server account that will send the emails. If
this is None, the value of EMAIL_PASS from the
~/.astrobase/.emailsettings file will be used. If that is None as well,
this function won't work.
email_server : str
The address of the email server that will send the emails. If this is
None, the value of EMAIL_USER from the ~/.astrobase/.emailsettings file
will be used. If that is None as well, this function won't work.
Returns
-------
bool
True if email sending succeeded. False if email sending failed.
'''
if not email_user:
email_user = EMAIL_USER
if not email_pass:
email_pass = EMAIL_PASSWORD
if not email_server:
email_server = EMAIL_SERVER
if not email_server and email_user and email_pass:
raise ValueError("no email server address and "
"credentials available, can't continue")
msg_text = EMAIL_TEMPLATE.format(
sender=sender,
hostname=socket.gethostname(),
activity_time='%sZ' % datetime.utcnow().isoformat(),
activity_report=content
)
email_sender = '%s <%s>' % (sender, EMAIL_USER)
# put together the recipient and email lists
email_recipients = [('%s <%s>' % (x,y))
for (x,y) in zip(email_recipient_list,
email_address_list)]
# put together the rest of the message
email_msg = MIMEText(msg_text)
email_msg['From'] = email_sender
email_msg['To'] = ', '.join(email_recipients)
email_msg['Message-Id'] = make_msgid()
email_msg['Subject'] = '[%s on %s] %s' % (
sender,
socket.gethostname(),
subject
)
email_msg['Date'] = formatdate(time.time())
# start the email process
try:
server = smtplib.SMTP(EMAIL_SERVER, 587)
server_ehlo_response = server.ehlo()
if server.has_extn('STARTTLS'):
try:
tls_start_response = server.starttls()
tls_ehlo_response = server.ehlo()
login_response = server.login(EMAIL_USER, EMAIL_PASSWORD)
send_response = (
server.sendmail(email_sender,
email_address_list,
email_msg.as_string())
)
except Exception as e:
print('script email sending failed with error: %s'
% e)
send_response = None
if send_response is not None:
print('script email sent successfully')
quit_response = server.quit()
return True
else:
quit_response = server.quit()
return False
else:
print('email server does not support STARTTLS,'
' bailing out...')
quit_response = server.quit()
return False
except Exception as e:
print('sending email failed with error: %s' % e)
returnval = False
quit_response = server.quit()
return returnval
|
This sends an email to addresses, informing them about events.
The email account settings are retrieved from the settings file as described
above.
Parameters
----------
sender : str
The name of the sender to use in the email header.
subject : str
Subject of the email.
content : str
Content of the email.
email_recipient list : list of str
This is a list of email recipient names of the form:
`['Example Person 1', 'Example Person 1', ...]`
email_recipient list : list of str
This is a list of email recipient addresses of the form:
`['example1@example.com', 'example2@example.org', ...]`
email_user : str
The username of the email server account that will send the emails. If
this is None, the value of EMAIL_USER from the
~/.astrobase/.emailsettings file will be used. If that is None as well,
this function won't work.
email_pass : str
The password of the email server account that will send the emails. If
this is None, the value of EMAIL_PASS from the
~/.astrobase/.emailsettings file will be used. If that is None as well,
this function won't work.
email_server : str
The address of the email server that will send the emails. If this is
None, the value of EMAIL_USER from the ~/.astrobase/.emailsettings file
will be used. If that is None as well, this function won't work.
Returns
-------
bool
True if email sending succeeded. False if email sending failed.
|
def summarize_address_range(first, last):
"""Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
"""
if not (isinstance(first, _BaseIP) and isinstance(last, _BaseIP)):
raise TypeError('first and last must be IP addresses, not networks')
if first.version != last.version:
raise TypeError("%s and %s are not of the same version" % (
str(first), str(last)))
if first > last:
raise ValueError('last IP address must be greater than first')
networks = []
if first.version == 4:
ip = IPv4Network
elif first.version == 6:
ip = IPv6Network
else:
raise ValueError('unknown IP version')
ip_bits = first._max_prefixlen
first_int = first._ip
last_int = last._ip
while first_int <= last_int:
nbits = _count_righthand_zero_bits(first_int, ip_bits)
current = None
while nbits >= 0:
addend = 2**nbits - 1
current = first_int + addend
nbits -= 1
if current <= last_int:
break
prefix = _get_prefix_length(first_int, current, ip_bits)
net = ip('%s/%d' % (str(first), prefix))
networks.append(net)
if current == ip._ALL_ONES:
break
first_int = current + 1
first = IPAddress(first_int, version=first._version)
return networks
|
Summarize a network range given the first and last IP addresses.
Example:
>>> summarize_address_range(IPv4Address('1.1.1.0'),
IPv4Address('1.1.1.130'))
[IPv4Network('1.1.1.0/25'), IPv4Network('1.1.1.128/31'),
IPv4Network('1.1.1.130/32')]
Args:
first: the first IPv4Address or IPv6Address in the range.
last: the last IPv4Address or IPv6Address in the range.
Returns:
The address range collapsed to a list of IPv4Network's or
IPv6Network's.
Raise:
TypeError:
If the first and last objects are not IP addresses.
If the first and last objects are not the same version.
ValueError:
If the last object is not greater than the first.
If the version is not 4 or 6.
|
def track_metric(self, unique_identifier, metric, date=None, inc_amt=1, **kwargs):
"""
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
"""
metric = [metric] if isinstance(metric, basestring) else metric
unique_identifier = [unique_identifier] if not isinstance(unique_identifier, (types.ListType, types.TupleType, types.GeneratorType,)) else unique_identifier
results = []
if date is None:
date = datetime.date.today()
with self._analytics_backend.map() as conn:
for uid in unique_identifier:
hash_key_daily = self._get_daily_metric_key(uid, date)
closest_monday = self._get_closest_week(date)
hash_key_weekly = self._get_weekly_metric_key(uid, date)
for single_metric in metric:
daily_metric_name = self._get_daily_metric_name(single_metric, date)
weekly_metric_name = self._get_weekly_metric_name(single_metric, closest_monday)
monthly_metric_name = self._get_monthly_metric_name(single_metric, date)
results.append(
[
conn.hincrby(hash_key_daily, daily_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, weekly_metric_name, inc_amt),
conn.hincrby(hash_key_weekly, monthly_metric_name, inc_amt),
conn.incr(self._prefix + ":" + "analy:%s:count:%s" % (uid, single_metric), inc_amt)
]
)
return results
|
Tracks a metric for a specific ``unique_identifier`` for a certain date. The redis backend supports
lists for both ``unique_identifier`` and ``metric`` allowing for tracking of multiple metrics for multiple
unique_identifiers efficiently. Not all backends may support this.
:param unique_identifier: Unique string indetifying the object this metric is for
:param metric: A unique name for the metric you want to track. This can be a list or a string.
:param date: A python date object indicating when this event occured. Defaults to today.
:param inc_amt: The amount you want to increment the ``metric`` for the ``unique_identifier``
:return: ``True`` if successful ``False`` otherwise
|
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2))
|
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
|
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
User = get_user_model()
output = []
for i in models.Model.objects.values('user__pk').distinct():
pk = i['user__pk']
if pk is not None:
output.append([pk, User.objects.get(pk=pk).__str__])
return output
|
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
|
def autoprops_decorate(cls, # type: Type[T]
include=None, # type: Union[str, Tuple[str]]
exclude=None # type: Union[str, Tuple[str]]
):
# type: (...) -> Type[T]
"""
To automatically generate all properties getters and setters from the class constructor manually, without using
@autoprops decorator.
* if a @contract annotation exist on the __init__ method, mentioning a contract for a given parameter, the
parameter contract will be added on the generated setter method
* The user may override the generated getter and/or setter by creating them explicitly in the class and annotating
them with @getter_override or @setter_override. Note that the contract will still be dynamically added on the
setter, even if the setter already has one (in such case a `UserWarning` will be issued)
:param cls: the class on which to execute. Note that it won't be wrapped.
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:return:
"""
# first check that we do not conflict with other known decorators
_check_known_decorators(cls, '@autoprops')
# perform the class mod
_execute_autoprops_on_class(cls, include=include, exclude=exclude)
# TODO better create a wrapper than modify the class? Probably not
# class Autoprops_Wrapper(object):
# def __init__(self, *args, **kwargs):
# self.wrapped = cls(*args, **kwargs)
#
# return Autoprops_Wrapper
return cls
|
To automatically generate all properties getters and setters from the class constructor manually, without using
@autoprops decorator.
* if a @contract annotation exist on the __init__ method, mentioning a contract for a given parameter, the
parameter contract will be added on the generated setter method
* The user may override the generated getter and/or setter by creating them explicitly in the class and annotating
them with @getter_override or @setter_override. Note that the contract will still be dynamically added on the
setter, even if the setter already has one (in such case a `UserWarning` will be issued)
:param cls: the class on which to execute. Note that it won't be wrapped.
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:return:
|
def _get_class(template_file):
"""
Import the file and inspect for subclass of TaskTemplate.
:param template_file: filename to import.
"""
with warnings.catch_warnings():
# suppress warning from importing
warnings.filterwarnings("ignore", category=RuntimeWarning)
template_module = imp.load_source('module.name', template_file)
# Find the subclass of TaskTemplate
for name, data in inspect.getmembers(template_module, inspect.isclass):
if issubclass(data, TaskTemplate) and data.__name__ != TaskTemplate.__name__:
return data
|
Import the file and inspect for subclass of TaskTemplate.
:param template_file: filename to import.
|
def norm_to_uniform(im, scale=None):
r"""
Take an image with normally distributed greyscale values and converts it to
a uniform (i.e. flat) distribution. It's also possible to specify the
lower and upper limits of the uniform distribution.
Parameters
----------
im : ND-image
The image containing the normally distributed scalar field
scale : [low, high]
A list or array indicating the lower and upper bounds for the new
randomly distributed data. The default is ``None``, which uses the
``max`` and ``min`` of the original image as the the lower and upper
bounds, but another common option might be [0, 1].
Returns
-------
image : ND-array
A copy of ``im`` with uniformly distributed greyscale values spanning
the specified range, if given.
"""
if scale is None:
scale = [im.min(), im.max()]
im = (im - sp.mean(im)) / sp.std(im)
im = 1 / 2 * sp.special.erfc(-im / sp.sqrt(2))
im = (im - im.min()) / (im.max() - im.min())
im = im * (scale[1] - scale[0]) + scale[0]
return im
|
r"""
Take an image with normally distributed greyscale values and converts it to
a uniform (i.e. flat) distribution. It's also possible to specify the
lower and upper limits of the uniform distribution.
Parameters
----------
im : ND-image
The image containing the normally distributed scalar field
scale : [low, high]
A list or array indicating the lower and upper bounds for the new
randomly distributed data. The default is ``None``, which uses the
``max`` and ``min`` of the original image as the the lower and upper
bounds, but another common option might be [0, 1].
Returns
-------
image : ND-array
A copy of ``im`` with uniformly distributed greyscale values spanning
the specified range, if given.
|
def auto_rewrite_input(self, cmd):
"""Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
"""
if not self.show_rewritten_input:
return
rw = self.prompt_manager.render('rewrite') + cmd
try:
# plain ascii works better w/ pyreadline, on some machines, so
# we use it and only print uncolored rewrite if we have unicode
rw = str(rw)
print >> io.stdout, rw
except UnicodeEncodeError:
print "------> " + cmd
|
Print to the screen the rewritten form of the user's command.
This shows visual feedback by rewriting input lines that cause
automatic calling to kick in, like::
/f x
into::
------> f(x)
after the user's input prompt. This helps the user understand that the
input line was transformed automatically by IPython.
|
def blockreplace(
name,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None,
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
show_changes=True,
append_newline=None,
insert_before_match=None,
insert_after_match=None):
'''
Maintain an edit in a file in a zone delimited by two line markers
.. versionadded:: 2014.1.0
.. versionchanged:: 2017.7.5,2018.3.1
``append_newline`` argument added. Additionally, to improve
idempotence, if the string represented by ``marker_end`` is found in
the middle of the line, the content preceding the marker will be
removed when the block is replaced. This allows one to remove
``append_newline: False`` from the SLS and have the block properly
replaced if the end of the content block is immediately followed by the
``marker_end`` (i.e. no newline before the marker).
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal. This can help you
maintaining an un-managed file containing manual edits.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
Additionally, you can use :py:func:`file.accumulated
<salt.states.file.accumulated>` and target this state. All accumulated
data dictionaries' content will be added in the content block.
name
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying the end of the content block. As of
versions 2017.7.5 and 2018.3.1, everything up to the text matching the
marker will be replaced, so it's important to ensure that your marker
includes the beginning of the text you wish to replace.
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_end``
source
The source file to download to the minion, this source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed. This is also the case when a file
already exists and the source is undefined; the contents of the file
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required.
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored.
.. code-block:: yaml
file_override_example:
file.blockreplace:
- name: /etc/example.conf
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
See the ``source_hash`` parameter description for :mod:`file.managed
<salt.states.file.managed>` function for more details and examples.
template : jinja
Templating engine to be used to render the downloaded file. The
following engines are supported:
- :mod:`cheetah <salt.renderers.cheetah>`
- :mod:`genshi <salt.renderers.genshi>`
- :mod:`jinja <salt.renderers.jinja>`
- :mod:`mako <salt.renderers.mako>`
- :mod:`py <salt.renderers.py>`
- :mod:`wempy <salt.renderers.wempy>`
context
Overrides default context variables passed to the template
defaults
Default context passed to the template
append_if_not_found : False
If markers are not found and this option is set to ``True``, the
content block will be appended to the file.
prepend_if_not_found : False
If markers are not found and this option is set to ``True``, the
content block will be prepended to the file.
insert_before_match
If markers are not found, this parameter can be set to a regex which will
insert the block before the first found occurrence in the file.
.. versionadded:: Neon
insert_after_match
If markers are not found, this parameter can be set to a regex which will
insert the block after the first found occurrence in the file.
.. versionadded:: Neon
backup
The file extension to use for a backup of the file if any edit is made.
Set this to ``False`` to skip making a backup.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes : True
Controls how changes are presented. If ``True``, the ``Changes``
section of the state return will contain a unified diff of the changes
made. If False, then it will contain a boolean (``True`` if any changes
were made, otherwise ``False``).
append_newline
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is unspecified, then a newline will
only be added to the content block if it does not already end in a
newline.
.. versionadded:: 2017.7.5,2018.3.1
Example of usage with an accumulator and with a variable:
.. code-block:: jinja
{% set myvar = 42 %}
hosts-config-block-{{ myvar }}:
file.blockreplace:
- name: /etc/hosts
- marker_start: "# START managed zone {{ myvar }} -DO-NOT-EDIT-"
- marker_end: "# END managed zone {{ myvar }} --"
- content: 'First line of content'
- append_if_not_found: True
- backup: '.bak'
- show_changes: True
hosts-config-block-{{ myvar }}-accumulated1:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: "text 2"
- require_in:
- file: hosts-config-block-{{ myvar }}
hosts-config-block-{{ myvar }}-accumulated2:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: |
text 3
text 4
- require_in:
- file: hosts-config-block-{{ myvar }}
will generate and maintain a block of content in ``/etc/hosts``:
.. code-block:: text
# START managed zone 42 -DO-NOT-EDIT-
First line of content
text 2
text 3
text 4
# END managed zone 42 --
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.blockreplace')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
accum_data, accum_deps = _load_accumulators()
if name in accum_data:
accumulator = accum_data[name]
# if we have multiple accumulators for a file, only apply the one
# required at a time
deps = accum_deps.get(name, [])
filtered = [a for a in deps if
__low__['__id__'] in deps[a] and a in accumulator]
if not filtered:
filtered = [a for a in accumulator]
for acc in filtered:
acc_content = accumulator[acc]
for line in acc_content:
if content == '':
content = line
else:
content += "\n" + line
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
for index, item in enumerate(text):
content += six.text_type(item)
try:
changes = __salt__['file.blockreplace'](
name,
marker_start,
marker_end,
content=content,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
insert_before_match=insert_before_match,
insert_after_match=insert_after_match,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes,
append_newline=append_newline)
except Exception as exc:
log.exception('Encountered error managing block')
ret['comment'] = (
'Encountered error managing block: {0}. '
'See the log for details.'.format(exc)
)
return ret
if changes:
ret['changes']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would be made'
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
|
Maintain an edit in a file in a zone delimited by two line markers
.. versionadded:: 2014.1.0
.. versionchanged:: 2017.7.5,2018.3.1
``append_newline`` argument added. Additionally, to improve
idempotence, if the string represented by ``marker_end`` is found in
the middle of the line, the content preceding the marker will be
removed when the block is replaced. This allows one to remove
``append_newline: False`` from the SLS and have the block properly
replaced if the end of the content block is immediately followed by the
``marker_end`` (i.e. no newline before the marker).
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal. This can help you
maintaining an un-managed file containing manual edits.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
Additionally, you can use :py:func:`file.accumulated
<salt.states.file.accumulated>` and target this state. All accumulated
data dictionaries' content will be added in the content block.
name
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying the end of the content block. As of
versions 2017.7.5 and 2018.3.1, everything up to the text matching the
marker will be replaced, so it's important to ensure that your marker
includes the beginning of the text you wish to replace.
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_end``
source
The source file to download to the minion, this source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed. This is also the case when a file
already exists and the source is undefined; the contents of the file
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required.
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored.
.. code-block:: yaml
file_override_example:
file.blockreplace:
- name: /etc/example.conf
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
See the ``source_hash`` parameter description for :mod:`file.managed
<salt.states.file.managed>` function for more details and examples.
template : jinja
Templating engine to be used to render the downloaded file. The
following engines are supported:
- :mod:`cheetah <salt.renderers.cheetah>`
- :mod:`genshi <salt.renderers.genshi>`
- :mod:`jinja <salt.renderers.jinja>`
- :mod:`mako <salt.renderers.mako>`
- :mod:`py <salt.renderers.py>`
- :mod:`wempy <salt.renderers.wempy>`
context
Overrides default context variables passed to the template
defaults
Default context passed to the template
append_if_not_found : False
If markers are not found and this option is set to ``True``, the
content block will be appended to the file.
prepend_if_not_found : False
If markers are not found and this option is set to ``True``, the
content block will be prepended to the file.
insert_before_match
If markers are not found, this parameter can be set to a regex which will
insert the block before the first found occurrence in the file.
.. versionadded:: Neon
insert_after_match
If markers are not found, this parameter can be set to a regex which will
insert the block after the first found occurrence in the file.
.. versionadded:: Neon
backup
The file extension to use for a backup of the file if any edit is made.
Set this to ``False`` to skip making a backup.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes : True
Controls how changes are presented. If ``True``, the ``Changes``
section of the state return will contain a unified diff of the changes
made. If False, then it will contain a boolean (``True`` if any changes
were made, otherwise ``False``).
append_newline
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is unspecified, then a newline will
only be added to the content block if it does not already end in a
newline.
.. versionadded:: 2017.7.5,2018.3.1
Example of usage with an accumulator and with a variable:
.. code-block:: jinja
{% set myvar = 42 %}
hosts-config-block-{{ myvar }}:
file.blockreplace:
- name: /etc/hosts
- marker_start: "# START managed zone {{ myvar }} -DO-NOT-EDIT-"
- marker_end: "# END managed zone {{ myvar }} --"
- content: 'First line of content'
- append_if_not_found: True
- backup: '.bak'
- show_changes: True
hosts-config-block-{{ myvar }}-accumulated1:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: "text 2"
- require_in:
- file: hosts-config-block-{{ myvar }}
hosts-config-block-{{ myvar }}-accumulated2:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: |
text 3
text 4
- require_in:
- file: hosts-config-block-{{ myvar }}
will generate and maintain a block of content in ``/etc/hosts``:
.. code-block:: text
# START managed zone 42 -DO-NOT-EDIT-
First line of content
text 2
text 3
text 4
# END managed zone 42 --
|
def tdSensor(self):
"""Get the next sensor while iterating.
:return: a dict with the keys: protocol, model, id, datatypes.
"""
protocol = create_string_buffer(20)
model = create_string_buffer(20)
sid = c_int()
datatypes = c_int()
self._lib.tdSensor(protocol, sizeof(protocol), model, sizeof(model),
byref(sid), byref(datatypes))
return {'protocol': self._to_str(protocol),
'model': self._to_str(model),
'id': sid.value, 'datatypes': datatypes.value}
|
Get the next sensor while iterating.
:return: a dict with the keys: protocol, model, id, datatypes.
|
def reverse_iter(self, start=None, stop=None, count=2000):
""" -> yields items of the list in reverse """
cursor = '0'
count = 1000
start = start if start is not None else (-1 * count)
stop = stop if stop is not None else -1
_loads = self._loads
while cursor:
cursor = self._client.lrange(self.key_prefix, start, stop)
for x in reversed(cursor or []):
yield _loads(x)
start -= count
stop -= count
|
-> yields items of the list in reverse
|
def parse_meta(self, selected_meta_data):
"""
Parses all of the metadata files
:param selected_meta_data: if specified then only the columns that are contained here are going to be parsed
:return:
"""
# reads all meta data files
files = self._get_files("meta", self.path)
df = pd.DataFrame()
print("Parsing the metadata files...")
for f in tqdm(files):
data = self.parse_single_meta(f, selected_meta_data)
if data is not None:
df = pd.concat([df, data], axis=0)
df.index = df['sample']
#
# df = df.drop('sample', 1) # 1 for the columns
return df
|
Parses all of the metadata files
:param selected_meta_data: if specified then only the columns that are contained here are going to be parsed
:return:
|
def plot_fit_individuals_lens_plane_only(
fit, should_plot_mask=True, extract_array_from_mask=False, zoom_around_mask=False, positions=None,
should_plot_image_plane_pix=False,
should_plot_image=False,
should_plot_noise_map=False,
should_plot_signal_to_noise_map=False,
should_plot_model_image=False,
should_plot_residual_map=False,
should_plot_chi_squared_map=False,
units='arcsec',
output_path=None, output_format='show'):
"""Plot the model datas_ of an analysis, using the *Fitter* class object.
The visualization and output type can be fully customized.
Parameters
-----------
fit : autolens.lens.fitting.Fitter
Class containing fit between the model datas_ and observed lens datas_ (including residual_map, chi_squared_map etc.)
output_path : str
The path where the datas_ is output if the output_type is a file format (e.g. png, fits)
output_format : str
How the datas_ is output. File formats (e.g. png, fits) output the datas_ to harddisk. 'show' displays the datas_ \
in the python interpreter window.
"""
mask = lens_plotter_util.get_mask(fit=fit, should_plot_mask=should_plot_mask)
kpc_per_arcsec = fit.tracer.image_plane.kpc_per_arcsec
if should_plot_image:
image_plane_pix_grid = lens_plotter_util.get_image_plane_pix_grid(should_plot_image_plane_pix, fit)
lens_plotter_util.plot_image(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,
image_plane_pix_grid=image_plane_pix_grid,
units=units, kpc_per_arcsec=kpc_per_arcsec,
output_path=output_path, output_format=output_format)
if should_plot_noise_map:
lens_plotter_util.plot_noise_map(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,
units=units, kpc_per_arcsec=kpc_per_arcsec,
output_path=output_path, output_format=output_format)
if should_plot_signal_to_noise_map:
lens_plotter_util.plot_signal_to_noise_map(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,
units=units, kpc_per_arcsec=kpc_per_arcsec,
output_path=output_path, output_format=output_format)
if should_plot_model_image:
lens_plotter_util.plot_model_data(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask, positions=positions,
units=units, kpc_per_arcsec=kpc_per_arcsec,
output_path=output_path, output_format=output_format)
if should_plot_residual_map:
lens_plotter_util.plot_residual_map(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,
units=units, kpc_per_arcsec=kpc_per_arcsec,
output_path=output_path, output_format=output_format)
if should_plot_chi_squared_map:
lens_plotter_util.plot_chi_squared_map(
fit=fit, mask=mask, extract_array_from_mask=extract_array_from_mask, zoom_around_mask=zoom_around_mask,
units=units, kpc_per_arcsec=kpc_per_arcsec,
output_path=output_path, output_format=output_format)
|
Plot the model datas_ of an analysis, using the *Fitter* class object.
The visualization and output type can be fully customized.
Parameters
-----------
fit : autolens.lens.fitting.Fitter
Class containing fit between the model datas_ and observed lens datas_ (including residual_map, chi_squared_map etc.)
output_path : str
The path where the datas_ is output if the output_type is a file format (e.g. png, fits)
output_format : str
How the datas_ is output. File formats (e.g. png, fits) output the datas_ to harddisk. 'show' displays the datas_ \
in the python interpreter window.
|
def find_link(self, device):
'''find a device based on number, name or label'''
for i in range(len(self.mpstate.mav_master)):
conn = self.mpstate.mav_master[i]
if (str(i) == device or
conn.address == device or
getattr(conn, 'label', None) == device):
return i
return None
|
find a device based on number, name or label
|
def _set_security(self, v, load=False):
"""
Setter method for security, mapped from YANG variable /rbridge_id/threshold_monitor/security (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_security is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_security() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=security.security, is_container='container', presence=False, yang_name="security", rest_name="security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Monitor security class', u'callpoint': u'securityconfiguration', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """security must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=security.security, is_container='container', presence=False, yang_name="security", rest_name="security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Monitor security class', u'callpoint': u'securityconfiguration', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-threshold-monitor', defining_module='brocade-threshold-monitor', yang_type='container', is_config=True)""",
})
self.__security = t
if hasattr(self, '_set'):
self._set()
|
Setter method for security, mapped from YANG variable /rbridge_id/threshold_monitor/security (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_security is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_security() directly.
|
def fetch_batch_status(self, guid):
"""Fetch the status of a batch, given the guid"""
url = '%s/api/v5/batch/%s/status' % (self.base_url, guid)
headers = {
'User-Agent': 'kentik-python-api/0.1',
'Content-Type': 'application/json',
'X-CH-Auth-Email': self.api_email,
'X-CH-Auth-API-Token': self.api_token
}
resp = requests.get(url, headers=headers)
# break out at first sign of trouble
resp.raise_for_status()
return BatchResponse(guid, resp.json())
|
Fetch the status of a batch, given the guid
|
def _starts_with_vowel(self, letter_group: str) -> bool:
"""Check if a string starts with a vowel."""
if len(letter_group) == 0:
return False
return self._contains_vowels(letter_group[0])
|
Check if a string starts with a vowel.
|
def get_validated_token(self, raw_token):
"""
Validates an encoded JSON web token and returns a validated token
wrapper object.
"""
messages = []
for AuthToken in api_settings.AUTH_TOKEN_CLASSES:
try:
return AuthToken(raw_token)
except TokenError as e:
messages.append({'token_class': AuthToken.__name__,
'token_type': AuthToken.token_type,
'message': e.args[0]})
raise InvalidToken({
'detail': _('Given token not valid for any token type'),
'messages': messages,
})
|
Validates an encoded JSON web token and returns a validated token
wrapper object.
|
def calculate_sampling_decision(trace_header, recorder, sampling_req):
"""
Return 1 or the matched rule name if should sample and 0 if should not.
The sampling decision coming from ``trace_header`` always has
the highest precedence. If the ``trace_header`` doesn't contain
sampling decision then it checks if sampling is enabled or not
in the recorder. If not enbaled it returns 1. Otherwise it uses user
defined sampling rules to decide.
"""
if trace_header.sampled is not None and trace_header.sampled != '?':
return trace_header.sampled
elif not recorder.sampling:
return 1
else:
decision = recorder.sampler.should_trace(sampling_req)
return decision if decision else 0
|
Return 1 or the matched rule name if should sample and 0 if should not.
The sampling decision coming from ``trace_header`` always has
the highest precedence. If the ``trace_header`` doesn't contain
sampling decision then it checks if sampling is enabled or not
in the recorder. If not enbaled it returns 1. Otherwise it uses user
defined sampling rules to decide.
|
def get_variable(name, temp_s):
'''
Get variable by name.
'''
return tf.Variable(tf.zeros(temp_s), name=name)
|
Get variable by name.
|
def print_groups():
"""Print all groups as JSON"""
print("Printing information about all groups defined in the Gateway")
groups = api(gateway.get_groups())
if len(groups) == 0:
exit(bold("No groups defined"))
container = []
for group in groups:
container.append(api(group).raw)
print(jsonify(container))
|
Print all groups as JSON
|
def _is_admin(user_id):
"""
Is the specified user an admin
"""
user = get_session().query(User).filter(User.id==user_id).one()
if user.is_admin():
return True
else:
return False
|
Is the specified user an admin
|
def now_micros(absolute=False) -> int:
"""Return current micros since epoch as integer."""
micros = int(time.time() * 1e6)
if absolute:
return micros
return micros - EPOCH_MICROS
|
Return current micros since epoch as integer.
|
def destination_uri_file_counts(self):
"""Return file counts from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.extract.destinationUriFileCounts
Returns:
a list of integer counts, each representing the number of files
per destination URI or URI pattern specified in the extract
configuration. These values will be in the same order as the URIs
specified in the 'destinationUris' field. Returns None if job is
not yet complete.
"""
counts = self._job_statistics().get("destinationUriFileCounts")
if counts is not None:
return [int(count) for count in counts]
return None
|
Return file counts from job statistics, if present.
See:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#statistics.extract.destinationUriFileCounts
Returns:
a list of integer counts, each representing the number of files
per destination URI or URI pattern specified in the extract
configuration. These values will be in the same order as the URIs
specified in the 'destinationUris' field. Returns None if job is
not yet complete.
|
def split(args):
"""
%prog split pairs.fastq
Split shuffled pairs into `.1.fastq` and `.2.fastq`, using `sed`. Can work
on gzipped file.
<http://seqanswers.com/forums/showthread.php?t=13776>
"""
from jcvi.apps.grid import Jobs
p = OptionParser(split.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
pairsfastq, = args
gz = pairsfastq.endswith(".gz")
pf = pairsfastq.replace(".gz", "").rsplit(".", 1)[0]
p1 = pf + ".1.fastq"
p2 = pf + ".2.fastq"
cmd = "zcat" if gz else "cat"
p1cmd = cmd + " {0} | sed -ne '1~8{{N;N;N;p}}'".format(pairsfastq)
p2cmd = cmd + " {0} | sed -ne '5~8{{N;N;N;p}}'".format(pairsfastq)
if gz:
p1cmd += " | gzip"
p2cmd += " | gzip"
p1 += ".gz"
p2 += ".gz"
p1cmd += " > " + p1
p2cmd += " > " + p2
args = [(p1cmd, ), (p2cmd, )]
m = Jobs(target=sh, args=args)
m.run()
checkShuffleSizes(p1, p2, pairsfastq)
|
%prog split pairs.fastq
Split shuffled pairs into `.1.fastq` and `.2.fastq`, using `sed`. Can work
on gzipped file.
<http://seqanswers.com/forums/showthread.php?t=13776>
|
def dispatch(argdict):
'''Call the command-specific function, depending on the command.'''
cmd = argdict['command']
ftc = getattr(THIS_MODULE, 'do_'+cmd)
ftc(argdict)
|
Call the command-specific function, depending on the command.
|
def get_cloud_init_mime(cloud_init):
'''
Get a mime multipart encoded string from a cloud-init dict. Currently
supports boothooks, scripts and cloud-config.
CLI Example:
.. code-block:: bash
salt myminion boto.get_cloud_init_mime <cloud init>
'''
if isinstance(cloud_init, six.string_types):
cloud_init = salt.utils.json.loads(cloud_init)
_cloud_init = email.mime.multipart.MIMEMultipart()
if 'boothooks' in cloud_init:
for script_name, script in six.iteritems(cloud_init['boothooks']):
_script = email.mime.text.MIMEText(script, 'cloud-boothook')
_cloud_init.attach(_script)
if 'scripts' in cloud_init:
for script_name, script in six.iteritems(cloud_init['scripts']):
_script = email.mime.text.MIMEText(script, 'x-shellscript')
_cloud_init.attach(_script)
if 'cloud-config' in cloud_init:
cloud_config = cloud_init['cloud-config']
_cloud_config = email.mime.text.MIMEText(
salt.utils.yaml.safe_dump(cloud_config, default_flow_style=False),
'cloud-config')
_cloud_init.attach(_cloud_config)
return _cloud_init.as_string()
|
Get a mime multipart encoded string from a cloud-init dict. Currently
supports boothooks, scripts and cloud-config.
CLI Example:
.. code-block:: bash
salt myminion boto.get_cloud_init_mime <cloud init>
|
def convert_to_string(self, block):
"""
Takes a list of SeqRecordExpanded objects corresponding to a gene_code
and produces the gene_block as string.
:param block:
:return: str.
"""
out = ""
for seq_record in block:
taxon_id = ">{0}_{1}_{2} [org={0} {1}] [Specimen-voucher={2}] " \
"[note={3} gene, partial cds.] [Lineage={4}]".format(
seq_record.taxonomy['genus'],
seq_record.taxonomy['species'],
seq_record.voucher_code,
seq_record.gene_code,
seq_record.lineage,
)
sequence = get_seq(seq_record, self.codon_positions, self.aminoacids,
self.degenerate)
seq = sequence.seq
if sequence.warning:
self.warnings.append(sequence.warning)
n = 60
seq = [seq[i:i + n] for i in range(0, len(seq), n)]
out += '{0}\n{1}\n'.format(taxon_id, "\n".join(seq))
return out
|
Takes a list of SeqRecordExpanded objects corresponding to a gene_code
and produces the gene_block as string.
:param block:
:return: str.
|
def make_mujoco_env(env_id, seed, reward_scale=1.0):
"""
Create a wrapped, monitored gym.Env for MuJoCo.
"""
rank = MPI.COMM_WORLD.Get_rank()
myseed = seed + 1000 * rank if seed is not None else None
set_global_seeds(myseed)
env = gym.make(env_id)
logger_path = None if logger.get_dir() is None else os.path.join(logger.get_dir(), str(rank))
env = Monitor(env, logger_path, allow_early_resets=True)
env.seed(seed)
if reward_scale != 1.0:
from baselines.common.retro_wrappers import RewardScaler
env = RewardScaler(env, reward_scale)
return env
|
Create a wrapped, monitored gym.Env for MuJoCo.
|
def element_creator(namespace=None):
"""Create a simple namespace-aware objectify element creator.
Args:
namespace (str): Namespace to work in
Returns:
function: Namespace-aware element creator
"""
ELEMENT_MAKER = _objectify.ElementMaker(namespace=namespace,
annotate=False)
def create_elem(tag, attr=None, text=None):
""":class:`objectify.Element` wrapper with namespace defined.
Args:
tag (str): Tag name
attr (dict): Default attributes for tag
text (str): Text content for the tag
Returns:
_objectify.ObjectifiedElement: objectify element
"""
if not attr:
attr = {}
if text:
element = getattr(ELEMENT_MAKER, tag)(text, **attr)
else:
element = getattr(ELEMENT_MAKER, tag)(**attr)
return element
return create_elem
|
Create a simple namespace-aware objectify element creator.
Args:
namespace (str): Namespace to work in
Returns:
function: Namespace-aware element creator
|
def iterate(self, start_line=None, parse_attr=True, headers=False,
comments=False):
"""Iterate over GFF3 file, returning GFF3 entries
Args:
start_line (str): Next GFF3 entry. If 'handle' has been partially
read and you want to start iterating at the next entry, read
the next GFF3 entry and pass it to this variable when calling
gff3_iter. See 'Examples' for proper usage.
parse_attr (bool): Parse attributes column into a dictionary such
that the string "tag1=value1;tag2=value2" becomes:
tag1: value1
tag2: value2
headers (bool): Yields headers if True, else skips lines starting
with "##"
comments (bool): Yields comments if True, else skips lines starting
with "#"
Yields:
GFF3Entry: class containing all GFF3 data, yields str for headers
if headers options is True then yields GFF3Entry for entries
Examples:
The following three examples demonstrate how to use gff3_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in gff3_iter(open('test.gff3')):
... print(entry.seqid) # Sequence ID
... print(entry.source) # Software that performed annotation
... print(entry.type) # Type of annotation
... print(entry.start) # Start position of annotation
... print(entry.end) # End position of annotation
... print(entry.score) # Confidence score of annotation
... print(entry.strand) # Strand annotation is on
... print(entry.phase) # Bases until next codon
... print(entry.attributes) # Attributes of annotation
... print(entry.write()) # Reconstituted GFF3 entry
>>> gff3_handle = open('test.gff3')
>>> next(gff3_handle) # Skip first line/entry
>>> next_line = next(gff3_handle) # Store next entry
>>> for entry in gff3_iter(gff3_handle, start_line=next_line):
... print(entry.seqid) # Sequence ID
... print(entry.source) # Software that performed annotation
... print(entry.type) # Type of annotation
... print(entry.start) # Start position of annotation
... print(entry.end) # End position of annotation
... print(entry.score) # Confidence score of annotation
... print(entry.strand) # Strand annotation is on
... print(entry.phase) # Bases until next codon
... print(entry.attributes) # Attributes of annotation
... print(entry.write()) # Reconstituted GFF3 entry
>>> for entry in gff3_iter(open('test.gff3'), parse_attr=True):
... print(entry.seqid) # Sequence ID
... print(entry.source) # Software that performed annotation
... print(entry.type) # Type of annotation
... print(entry.start) # Start position of annotation
... print(entry.end) # End position of annotation
... print(entry.score) # Confidence score of annotation
... print(entry.strand) # Strand annotation is on
... print(entry.phase) # Bases until next codon
... print(entry.attributes['attr1']) # Print attribute 'attr1'
... print(entry.attributes['attr2']) # Print attribute 'attr2'
... print(entry.write()) # Reconstituted GFF3 entry
"""
handle = self.handle
# Speed tricks: reduces function calls
split = str.split
strip = str.strip
if start_line is None:
line = next(handle) # Read first GFF3
else:
line = start_line # Set header to given header
# Check if input is text or bytestream
if (isinstance(line, bytes)):
def next_line(i):
return next(i).decode('utf-8')
line = strip(line.decode('utf-8'))
else:
next_line = next
line = strip(line)
# Manual 'for' loop isn't needed to read the file properly and quickly,
# unlike fasta_iter and fastq_iter, but it is necessary begin iterating
# partway through a file when the user gives a starting line.
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
self.current_line += 1
data = GFF3Entry() # Initialize early to prevent access error
if line.startswith('##FASTA'): # Skip FASTA entries
raise FastaFound
if line.startswith('##') and not headers:
line = strip(next_line(handle))
continue
elif line.startswith('##') and headers:
yield line
line = strip(next_line(handle))
continue
if line.startswith('#') and not comments:
line = strip(next_line(handle))
continue
elif line.startswith('#') and comments:
yield line
line = strip(next_line(handle))
continue
split_line = split(line, '\t')
data.origline = line
data.seqid = split_line[0]
data.source = split_line[1]
data.type = split_line[2]
data.start = int(split_line[3])
data.end = int(split_line[4])
try: # Make float unless dot
data.score = float(split_line[5])
except ValueError:
data.score = split_line[5]
data._score_str = split_line[5]
data.strand = split_line[6]
try: # Get phase as int unless phase not given
data.phase = int(split_line[7])
except ValueError:
data.phase = split_line[7]
data.attributes = split_line[8]
if parse_attr:
attributes = split(data.attributes, ';')
data.attributes = OrderedDict()
for attribute in attributes:
split_attribute = attribute.split('=')
key = split_attribute[0]
value = split_attribute[-1].split(',') if ',' in \
split_attribute[-1] else split_attribute[-1]
if not key == '': # Avoid semicolon split at end
data.attributes[key] = value
line = strip(next_line(handle)) # Raises StopIteration at EOF
yield data
except StopIteration: # Yield last GFF3 entry
if data.origline:
yield data
else: #handle case where GFF ends in comment
pass
except FastaFound: # When FASTA found, last entry is repeat so pass
pass
|
Iterate over GFF3 file, returning GFF3 entries
Args:
start_line (str): Next GFF3 entry. If 'handle' has been partially
read and you want to start iterating at the next entry, read
the next GFF3 entry and pass it to this variable when calling
gff3_iter. See 'Examples' for proper usage.
parse_attr (bool): Parse attributes column into a dictionary such
that the string "tag1=value1;tag2=value2" becomes:
tag1: value1
tag2: value2
headers (bool): Yields headers if True, else skips lines starting
with "##"
comments (bool): Yields comments if True, else skips lines starting
with "#"
Yields:
GFF3Entry: class containing all GFF3 data, yields str for headers
if headers options is True then yields GFF3Entry for entries
Examples:
The following three examples demonstrate how to use gff3_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in gff3_iter(open('test.gff3')):
... print(entry.seqid) # Sequence ID
... print(entry.source) # Software that performed annotation
... print(entry.type) # Type of annotation
... print(entry.start) # Start position of annotation
... print(entry.end) # End position of annotation
... print(entry.score) # Confidence score of annotation
... print(entry.strand) # Strand annotation is on
... print(entry.phase) # Bases until next codon
... print(entry.attributes) # Attributes of annotation
... print(entry.write()) # Reconstituted GFF3 entry
>>> gff3_handle = open('test.gff3')
>>> next(gff3_handle) # Skip first line/entry
>>> next_line = next(gff3_handle) # Store next entry
>>> for entry in gff3_iter(gff3_handle, start_line=next_line):
... print(entry.seqid) # Sequence ID
... print(entry.source) # Software that performed annotation
... print(entry.type) # Type of annotation
... print(entry.start) # Start position of annotation
... print(entry.end) # End position of annotation
... print(entry.score) # Confidence score of annotation
... print(entry.strand) # Strand annotation is on
... print(entry.phase) # Bases until next codon
... print(entry.attributes) # Attributes of annotation
... print(entry.write()) # Reconstituted GFF3 entry
>>> for entry in gff3_iter(open('test.gff3'), parse_attr=True):
... print(entry.seqid) # Sequence ID
... print(entry.source) # Software that performed annotation
... print(entry.type) # Type of annotation
... print(entry.start) # Start position of annotation
... print(entry.end) # End position of annotation
... print(entry.score) # Confidence score of annotation
... print(entry.strand) # Strand annotation is on
... print(entry.phase) # Bases until next codon
... print(entry.attributes['attr1']) # Print attribute 'attr1'
... print(entry.attributes['attr2']) # Print attribute 'attr2'
... print(entry.write()) # Reconstituted GFF3 entry
|
def deserialize(cls, config, credentials):
"""
A *class method* which reconstructs credentials created by
:meth:`serialize`. You can also pass it a :class:`.Credentials`
instance.
:param dict config:
The same :doc:`config` used in the :func:`.login` to get the
credentials.
:param str credentials:
:class:`string` The serialized credentials or
:class:`.Credentials` instance.
:returns:
:class:`.Credentials`
"""
# Accept both serialized and normal.
if isinstance(credentials, Credentials):
return credentials
decoded = parse.unquote(credentials)
split = decoded.split('\n')
# We need the provider ID to move forward.
if split[0] is None:
raise CredentialsError(
'To deserialize credentials you need to specify a unique '
'integer under the "id" key in the config for each provider!')
# Get provider config by short name.
provider_name = id_to_name(config, int(split[0]))
cfg = config.get(provider_name)
# Get the provider class.
ProviderClass = resolve_provider_class(cfg.get('class_'))
deserialized = Credentials(config)
deserialized.provider_id = provider_id
deserialized.provider_type = ProviderClass.get_type()
deserialized.provider_type_id = split[1]
deserialized.provider_class = ProviderClass
deserialized.provider_name = provider_name
deserialized.provider_class = ProviderClass
# Add provider type specific properties.
return ProviderClass.reconstruct(split[2:], deserialized, cfg)
|
A *class method* which reconstructs credentials created by
:meth:`serialize`. You can also pass it a :class:`.Credentials`
instance.
:param dict config:
The same :doc:`config` used in the :func:`.login` to get the
credentials.
:param str credentials:
:class:`string` The serialized credentials or
:class:`.Credentials` instance.
:returns:
:class:`.Credentials`
|
def index(self, row, column, parent):
"""Reimplemented from QtCore.QAbstractItemModel
The internal pointer is the section.
The row determines the key in the scalars then sections of the configobj.
So for a given index, use row to retrieve the key::
key = self.get_key(index.internalPointer(), index.row())
To use the key on the section to get the value OR
use get_value(index) / get_configspec_str
"""
if not parent.isValid():
s = self._conf
else:
p = parent.internalPointer()
k = self.get_key(p, parent.row())
s = p[k]
return self.createIndex(row, column, s)
|
Reimplemented from QtCore.QAbstractItemModel
The internal pointer is the section.
The row determines the key in the scalars then sections of the configobj.
So for a given index, use row to retrieve the key::
key = self.get_key(index.internalPointer(), index.row())
To use the key on the section to get the value OR
use get_value(index) / get_configspec_str
|
def evaluate_block(self, comments):
"""Evaluate block comments."""
if self.jsdocs:
m1 = RE_JSDOC.match(comments)
if m1:
lines = []
for line in m1.group(1).splitlines(True):
l = line.lstrip()
lines.append(l[1:] if l.startswith('*') else l)
self.jsdoc_comments.append([''.join(lines), self.line_num, self.current_encoding])
elif self.blocks:
self.block_comments.append([comments[2:-2], self.line_num, self.current_encoding])
elif self.blocks:
self.block_comments.append([comments[2:-2], self.line_num, self.current_encoding])
|
Evaluate block comments.
|
def update(self, data, length=None):
"""
Hashes given byte string
@param data - string to hash
@param length - if not specifed, entire string is hashed,
otherwise only first length bytes
"""
if self.digest_finalized:
raise DigestError("No updates allowed")
if not isinstance(data, bintype):
raise TypeError("A byte string is expected")
if length is None:
length = len(data)
elif length > len(data):
raise ValueError("Specified length is greater than length of data")
result = libcrypto.EVP_DigestUpdate(self.ctx, c_char_p(data), length)
if result != 1:
raise DigestError("Unable to update digest")
|
Hashes given byte string
@param data - string to hash
@param length - if not specifed, entire string is hashed,
otherwise only first length bytes
|
def get_file(self, name):
"""
Returns the output file with the specified name, if no output files
match, returns None.
"""
files = self.get_output_files()
for f in files:
if f.get_name() == name:
return f
return None
|
Returns the output file with the specified name, if no output files
match, returns None.
|
def close(self):
"""Logs out and quits the current web driver/selenium session."""
if not self.driver:
return
try:
self.driver.implicitly_wait(1)
self.driver.find_element_by_id('link-logout').click()
except NoSuchElementException:
pass
self.driver.quit()
self.driver = None
|
Logs out and quits the current web driver/selenium session.
|
def groupByContent(paths):
"""Byte-for-byte comparison on an arbitrary number of files in parallel.
This operates by opening all files in parallel and comparing
chunk-by-chunk. This has the following implications:
- Reads the same total amount of data as hash comparison.
- Performs a *lot* of disk seeks. (Best suited for SSDs)
- Vulnerable to file handle exhaustion if used on its own.
:param paths: List of potentially identical files.
:type paths: iterable
:returns: A dict mapping one path to a list of all paths (self included)
with the same contents.
.. todo:: Start examining the ``while handles:`` block to figure out how to
minimize thrashing in situations where read-ahead caching is active.
Compare savings by read-ahead to savings due to eliminating false
positives as quickly as possible. This is a 2-variable min/max problem.
.. todo:: Look into possible solutions for pathological cases of thousands
of files with the same size and same pre-filter results. (File handle
exhaustion)
"""
handles, results = [], []
# Silently ignore files we don't have permission to read.
hList = []
for path in paths:
try:
hList.append((path, open(path, 'rb'), ''))
except IOError:
pass # TODO: Verbose-mode output here.
handles.append(hList)
while handles:
# Process more blocks.
more, done = compareChunks(handles.pop(0))
# Add the results to the top-level lists.
handles.extend(more)
results.extend(done)
# Keep the same API as the others.
return dict((x[0], x) for x in results)
|
Byte-for-byte comparison on an arbitrary number of files in parallel.
This operates by opening all files in parallel and comparing
chunk-by-chunk. This has the following implications:
- Reads the same total amount of data as hash comparison.
- Performs a *lot* of disk seeks. (Best suited for SSDs)
- Vulnerable to file handle exhaustion if used on its own.
:param paths: List of potentially identical files.
:type paths: iterable
:returns: A dict mapping one path to a list of all paths (self included)
with the same contents.
.. todo:: Start examining the ``while handles:`` block to figure out how to
minimize thrashing in situations where read-ahead caching is active.
Compare savings by read-ahead to savings due to eliminating false
positives as quickly as possible. This is a 2-variable min/max problem.
.. todo:: Look into possible solutions for pathological cases of thousands
of files with the same size and same pre-filter results. (File handle
exhaustion)
|
def get_linode(kwargs=None, call=None):
'''
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
'''
if call == 'action':
raise SaltCloudSystemExit(
'The get_linode function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
if name is None and linode_id is None:
raise SaltCloudSystemExit(
'The get_linode function requires either a \'name\' or a \'linode_id\'.'
)
if linode_id is None:
linode_id = get_linode_id_from_name(name)
result = _query('linode', 'list', args={'LinodeID': linode_id})
return result['DATA'][0]
|
Returns data for a single named Linode.
name
The name of the Linode for which to get data. Can be used instead
``linode_id``. Note this will induce an additional API call
compared to using ``linode_id``.
linode_id
The ID of the Linode for which to get data. Can be used instead of
``name``.
CLI Example:
.. code-block:: bash
salt-cloud -f get_linode my-linode-config name=my-instance
salt-cloud -f get_linode my-linode-config linode_id=1234567
|
def _merge_flags(new_flags, old_flags=None, conf='any'):
'''
Merges multiple lists of flags removing duplicates and resolving conflicts
giving priority to lasts lists.
'''
if not old_flags:
old_flags = []
args = [old_flags, new_flags]
if conf == 'accept_keywords':
tmp = new_flags + \
[i for i in old_flags if _check_accept_keywords(new_flags, i)]
else:
tmp = portage.flatten(args)
flags = {}
for flag in tmp:
if flag[0] == '-':
flags[flag[1:]] = False
else:
flags[flag] = True
tmp = []
for key, val in six.iteritems(flags):
if val:
tmp.append(key)
else:
tmp.append('-' + key)
# Next sort is just aesthetic, can be commented for a small performance
# boost
tmp.sort(key=lambda x: x.lstrip('-'))
return tmp
|
Merges multiple lists of flags removing duplicates and resolving conflicts
giving priority to lasts lists.
|
def process_shell(self, creator, entry, config):
"""Processing a shell entry."""
self.logger.info("Processing Bash code: start")
output = []
shell = creator(entry, config)
for line in shell.process():
output.append(line)
self.logger.info(" | %s", line)
if shell.success:
self.logger.info("Processing Bash code: finished")
return {'success': True, 'output': output}
for line in self.run_cleanup(config.env, shell.exit_code):
output.append(line)
self.logger.error("Pipeline has failed: leaving as soon as possible!")
self.event.failed()
return {'success': False, 'output': output}
|
Processing a shell entry.
|
def launch(self, args, unknown):
"""Launch something according to the provided arguments
:param args: arguments from the launch parser
:type args: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: SystemExit
"""
pm = plugins.PluginManager.get()
addon = pm.get_plugin(args.addon)
isgui = isinstance(addon, plugins.JB_StandaloneGuiPlugin)
if isgui:
gui.main.init_gui()
print "Launching %s..." % args.addon
addon.run()
if isgui:
app = gui.main.get_qapp()
sys.exit(app.exec_())
|
Launch something according to the provided arguments
:param args: arguments from the launch parser
:type args: Namespace
:param unknown: list of unknown arguments
:type unknown: list
:returns: None
:rtype: None
:raises: SystemExit
|
def get_states(self):
"""
Returns the states of variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_states()
{'bowel-problem': ['true', 'false'],
'dog-out': ['true', 'false'],
'family-out': ['true', 'false'],
'hear-bark': ['true', 'false'],
'light-on': ['true', 'false']}
"""
variable_states = {variable.find('NAME').text: [outcome.text for outcome in variable.findall('OUTCOME')]
for variable in self.network.findall('VARIABLE')}
return variable_states
|
Returns the states of variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_states()
{'bowel-problem': ['true', 'false'],
'dog-out': ['true', 'false'],
'family-out': ['true', 'false'],
'hear-bark': ['true', 'false'],
'light-on': ['true', 'false']}
|
def two_digit_freqs(digits, normalize=False):
"""
Consume digits of pi and compute 2 digits freq. counts.
"""
freqs = np.zeros(100, dtype='i4')
last = digits.next()
this = digits.next()
for d in digits:
index = int(last + this)
freqs[index] += 1
last = this
this = d
if normalize:
freqs = freqs/freqs.sum()
return freqs
|
Consume digits of pi and compute 2 digits freq. counts.
|
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
|
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
|
def network_lopf_solve(network, snapshots=None, formulation="angles", solver_options={},solver_logfile=None, keep_files=False,
free_memory={'pyomo'},extra_postprocessing=None):
"""
Solve linear optimal power flow for a group of snapshots and extract results.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
formulation : string
Formulation of the linear power flow equations to use; must be one of
["angles","cycles","kirchhoff","ptdf"]; must match formulation used for
building the model.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
solver_logfile : None|string
If not None, sets the logfile option of the solver.
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
free_memory : set, default {'pyomo'}
Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series
data away while the solver runs (as a pickle to disk) and/or free
`pyomo` data after the solution has been extracted.
extra_postprocessing : callable function
This function must take three arguments
`extra_postprocessing(network,snapshots,duals)` and is called after
the model has solved and the results are extracted. It allows the user to
extract further information about the solution, such as additional shadow prices.
Returns
-------
None
"""
snapshots = _as_snapshots(network, snapshots)
logger.info("Solving model using %s", network.opt.name)
if isinstance(network.opt, PersistentSolver):
args = []
else:
args = [network.model]
if isinstance(free_memory, string_types):
free_memory = {free_memory}
if 'pypsa' in free_memory:
with empty_network(network):
network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options)
else:
network.results = network.opt.solve(*args, suffixes=["dual"], keepfiles=keep_files, logfile=solver_logfile, options=solver_options)
if logger.isEnabledFor(logging.INFO):
network.results.write()
status = network.results["Solver"][0]["Status"].key
termination_condition = network.results["Solver"][0]["Termination condition"].key
if status == "ok" and termination_condition == "optimal":
logger.info("Optimization successful")
extract_optimisation_results(network, snapshots, formulation,
free_pyomo='pyomo' in free_memory,
extra_postprocessing=extra_postprocessing)
elif status == "warning" and termination_condition == "other":
logger.warning("WARNING! Optimization might be sub-optimal. Writing output anyway")
extract_optimisation_results(network, snapshots, formulation,
free_pyomo='pyomo' in free_memory,
extra_postprocessing=extra_postprocessing)
else:
logger.error("Optimisation failed with status %s and terminal condition %s"
% (status, termination_condition))
return status, termination_condition
|
Solve linear optimal power flow for a group of snapshots and extract results.
Parameters
----------
snapshots : list or index slice
A list of snapshots to optimise, must be a subset of
network.snapshots, defaults to network.snapshots
formulation : string
Formulation of the linear power flow equations to use; must be one of
["angles","cycles","kirchhoff","ptdf"]; must match formulation used for
building the model.
solver_options : dictionary
A dictionary with additional options that get passed to the solver.
(e.g. {'threads':2} tells gurobi to use only 2 cpus)
solver_logfile : None|string
If not None, sets the logfile option of the solver.
keep_files : bool, default False
Keep the files that pyomo constructs from OPF problem
construction, e.g. .lp file - useful for debugging
free_memory : set, default {'pyomo'}
Any subset of {'pypsa', 'pyomo'}. Allows to stash `pypsa` time-series
data away while the solver runs (as a pickle to disk) and/or free
`pyomo` data after the solution has been extracted.
extra_postprocessing : callable function
This function must take three arguments
`extra_postprocessing(network,snapshots,duals)` and is called after
the model has solved and the results are extracted. It allows the user to
extract further information about the solution, such as additional shadow prices.
Returns
-------
None
|
def action_rename(self):
"""
Rename a shortcut
"""
# get old and new name from args
old = self.args['<old>']
new = self.args['<new>']
# select the old shortcut
self.db_query('''
SELECT id FROM shortcuts WHERE name=?
''', (old,))
r = self.db_fetch_one()
# error if old doesn't exist
if r == None:
print_err('Shortcut "%s" does not exist!' % old)
return
# error if new exists
if self.shortcut_exists(new):
print_err('Shortcut "%s" already exists!' % new)
return
id = r[0]
# rename in DB
self.db_exec('''
UPDATE shortcuts SET name=? WHERE id=?
''', (new, id))
# show OK message
print_msg('Shortcut "%s" renamed to "%s".' % (old, new))
|
Rename a shortcut
|
def get_ips(host, port):
"""
lookup all IPs (v4 and v6)
"""
ips = set()
for af_type in (socket.AF_INET, socket.AF_INET6):
try:
records = socket.getaddrinfo(host, port, af_type, socket.SOCK_STREAM)
ips.update(rec[4][0] for rec in records)
except socket.gaierror as ex:
pass
return ips
|
lookup all IPs (v4 and v6)
|
def get_fallback_languages(self, language_code=None, site_id=None):
"""
Find out what the fallback language is for a given language choice.
.. versionadded 1.5
"""
choices = self.get_active_choices(language_code, site_id=site_id)
return choices[1:]
|
Find out what the fallback language is for a given language choice.
.. versionadded 1.5
|
def in4_chksum(proto, u, p):
"""
As Specified in RFC 2460 - 8.1 Upper-Layer Checksums
Performs IPv4 Upper Layer checksum computation. Provided parameters are:
- 'proto' : value of upper layer protocol
- 'u' : IP upper layer instance
- 'p' : the payload of the upper layer provided as a string
"""
if not isinstance(u, IP):
warning("No IP underlayer to compute checksum. Leaving null.")
return 0
if u.len is not None:
if u.ihl is None:
olen = sum(len(x) for x in u.options)
ihl = 5 + olen // 4 + (1 if olen % 4 else 0)
else:
ihl = u.ihl
ln = max(u.len - 4 * ihl, 0)
else:
ln = len(p)
psdhdr = struct.pack("!4s4sHH",
inet_pton(socket.AF_INET, u.src),
inet_pton(socket.AF_INET, u.dst),
proto,
ln)
return checksum(psdhdr + p)
|
As Specified in RFC 2460 - 8.1 Upper-Layer Checksums
Performs IPv4 Upper Layer checksum computation. Provided parameters are:
- 'proto' : value of upper layer protocol
- 'u' : IP upper layer instance
- 'p' : the payload of the upper layer provided as a string
|
def update(self, values):
"""Add new declarations to this set/
Args:
values (dict(name, declaration)): the declarations to ingest.
"""
for k, v in values.items():
root, sub = self.split(k)
if sub is None:
self.declarations[root] = v
else:
self.contexts[root][sub] = v
extra_context_keys = set(self.contexts) - set(self.declarations)
if extra_context_keys:
raise errors.InvalidDeclarationError(
"Received deep context for unknown fields: %r (known=%r)" % (
{
self.join(root, sub): v
for root in extra_context_keys
for sub, v in self.contexts[root].items()
},
sorted(self.declarations),
)
)
|
Add new declarations to this set/
Args:
values (dict(name, declaration)): the declarations to ingest.
|
def status(context):
"""See which files have changed, checked in, and uploaded"""
context.obj.find_repo_type()
context.obj.call([context.obj.vc_name, 'status'])
|
See which files have changed, checked in, and uploaded
|
def mask_blurring_from_mask_and_psf_shape(mask, psf_shape):
"""Compute a blurring masks from an input masks and psf shape.
The blurring masks corresponds to all pixels which are outside of the masks but will have a fraction of their \
light blur into the masked region due to PSF convolution."""
blurring_mask = np.full(mask.shape, True)
for y in range(mask.shape[0]):
for x in range(mask.shape[1]):
if not mask[y, x]:
for y1 in range((-psf_shape[0] + 1) // 2, (psf_shape[0] + 1) // 2):
for x1 in range((-psf_shape[1] + 1) // 2, (psf_shape[1] + 1) // 2):
if 0 <= x + x1 <= mask.shape[1] - 1 and 0 <= y + y1 <= mask.shape[0] - 1:
if mask[y + y1, x + x1]:
blurring_mask[y + y1, x + x1] = False
else:
raise exc.MaskException(
"setup_blurring_mask extends beyond the sub_grid_size of the masks - pad the "
"datas array before masking")
return blurring_mask
|
Compute a blurring masks from an input masks and psf shape.
The blurring masks corresponds to all pixels which are outside of the masks but will have a fraction of their \
light blur into the masked region due to PSF convolution.
|
def _join_partner(self, partner: Address):
""" Ensure a channel exists with partner and is funded in our side """
try:
self.api.channel_open(
self.registry_address,
self.token_address,
partner,
)
except DuplicatedChannelError:
# If channel already exists (either because partner created it,
# or it's nonfunded channel), continue to ensure it's funded
pass
total_deposit = self._initial_funding_per_partner
if total_deposit == 0:
return
try:
self.api.set_total_channel_deposit(
registry_address=self.registry_address,
token_address=self.token_address,
partner_address=partner,
total_deposit=total_deposit,
)
except InvalidDBData:
raise
except RECOVERABLE_ERRORS:
log.info(
'Deposit failed',
node=pex(self.raiden.address),
partner=pex(partner),
)
except RaidenUnrecoverableError:
should_crash = (
self.raiden.config['environment_type'] != Environment.PRODUCTION or
self.raiden.config['unrecoverable_error_should_crash']
)
if should_crash:
raise
log.critical(
'Deposit failed',
node=pex(self.raiden.address),
partner=pex(partner),
)
|
Ensure a channel exists with partner and is funded in our side
|
def _move_node_file(path, old_id, new_id):
"""
Move the files from a node when changing his id
:param path: Path of the project
:param old_id: ID before change
:param new_id: New node UUID
"""
root = os.path.join(path, "project-files")
if os.path.exists(root):
for dirname in os.listdir(root):
module_dir = os.path.join(root, dirname)
if os.path.isdir(module_dir):
node_dir = os.path.join(module_dir, old_id)
if os.path.exists(node_dir):
shutil.move(node_dir, os.path.join(module_dir, new_id))
|
Move the files from a node when changing his id
:param path: Path of the project
:param old_id: ID before change
:param new_id: New node UUID
|
def set_SaveName(self,SaveName=None,
include=None,
ForceUpdate=False):
""" Set the name for saving the instance (SaveName)
SaveName can be either:
- provided by the user (no constraint) - not recommended
- automatically generated from Name and key attributes (cf. include)
Parameters
----------
SaveName : None / str
If provided, overrides the default name for saving (not recommended)
include : list
Controls how te default SaveName is generated
Each element of the list is a key str indicating whether an element
should be present in the SaveName
ForceUpdate : bool
Flag indicating the behaviour when SaveName=None:
- True : A new SaveName is generated, overriding the old one
- False : The former SaveName is preserved (default)
"""
if not 'SaveName-usr' in self.dall.keys():
self._dall['SaveName-usr'] = (SaveName is not None)
# If SaveName provided by user, override
if SaveName is not None:
self._dall['SaveName'] = SaveName
self._dall['SaveName-usr'] = True
else:
# Don't update if former is user-defined and ForceUpdate is False
# Override if previous was:
# automatic or (user-defined but ForceUpdate is True)
C0 = self._dall['SaveName-usr']
C1 = self._dall['SaveName-usr'] and ForceUpdate
if (not C0) or C1:
SN = ID.SaveName_Conv(Mod=self.Mod, Cls=self.Cls,
Type=self.Type, Name=self.Name,
Deg=self.Deg, Exp=self.Exp,
Diag=self.Diag, shot=self.shot,
version=self.version, usr=self.usr,
include=include)
self._dall['SaveName'] = SN
self._dall['SaveName-usr'] = False
|
Set the name for saving the instance (SaveName)
SaveName can be either:
- provided by the user (no constraint) - not recommended
- automatically generated from Name and key attributes (cf. include)
Parameters
----------
SaveName : None / str
If provided, overrides the default name for saving (not recommended)
include : list
Controls how te default SaveName is generated
Each element of the list is a key str indicating whether an element
should be present in the SaveName
ForceUpdate : bool
Flag indicating the behaviour when SaveName=None:
- True : A new SaveName is generated, overriding the old one
- False : The former SaveName is preserved (default)
|
def append_new_text(destination, text, join_str=None):
"""
This method provides the functionality of adding text appropriately
underneath the destination node. This will be either to the destination's
text attribute or to the tail attribute of the last child.
"""
if join_str is None:
join_str = ' '
if len(destination) > 0: # Destination has children
last = destination[-1]
if last.tail is None: # Last child has no tail
last.tail = text
else: # Last child has a tail
last.tail = join_str.join([last.tail, text])
else: # Destination has no children
if destination.text is None: # Destination has no text
destination.text = text
else: # Destination has a text
destination.text = join_str.join([destination.text, text])
|
This method provides the functionality of adding text appropriately
underneath the destination node. This will be either to the destination's
text attribute or to the tail attribute of the last child.
|
def normalize_slice(e, n):
"""
Return the slice tuple normalized for an ``n``-element object.
:param e: a slice object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``.
"""
if n == 0:
return (0, 0, 1)
step = e.step
if step is None:
step = 1
if step == 0:
start = e.start
count = e.stop
if isinstance(start, int) and isinstance(count, int) and count >= 0:
if start < 0:
start += n
if start < 0:
return (0, 0, 0)
return (start, count, 0)
else:
raise ValueError("Invalid slice %r" % e)
assert isinstance(step, int) and step != 0
if e.start is None:
start = 0 if step > 0 else n - 1
else:
start = e.start
if start < 0:
start += n
if (start < 0 and step < 0) or (start >= n and step > 0):
return (0, 0, 0)
start = min(max(0, start), n - 1)
assert isinstance(start, int) and 0 <= start < n, \
"Invalid start: %r" % start
if e.stop is None:
if step > 0:
count = (n - 1 - start) // step + 1
else:
count = (start // -step) + 1
else:
stop = e.stop
if stop < 0:
stop += n
if step > 0:
if stop > start:
count = (min(n, stop) - 1 - start) // step + 1
else:
count = 0
else:
if stop < start:
count = (start - max(stop, -1) - 1) // -step + 1
else:
count = 0
assert isinstance(count, int) and count >= 0
assert count == 0 or 0 <= start + step * (count - 1) < n, \
"Wrong tuple: (%d, %d, %d)" % (start, count, step)
return (start, count, step)
|
Return the slice tuple normalized for an ``n``-element object.
:param e: a slice object representing a selector
:param n: number of elements in a sequence to which ``e`` is applied
:returns: tuple ``(start, count, step)`` derived from ``e``.
|
def getDelta(original, update):
"""
Generates an update token delta_{k->k'}.
@original: values for k: (w,msk,s)
@update: values for kPrime: (w',msk',s')
@return (delta, p'): @delta = k'/k, @p' is a new pubkey based on k'.
"""
# Compute both keys
k = genKw(*original)
kPrime = genKw(*update)
# Compute delta,p'
delta = (kPrime * inverse(k, orderGt())) % orderGt()
pPrime = generatorGt()**kPrime
return delta,pPrime
|
Generates an update token delta_{k->k'}.
@original: values for k: (w,msk,s)
@update: values for kPrime: (w',msk',s')
@return (delta, p'): @delta = k'/k, @p' is a new pubkey based on k'.
|
def ast_from_module_name(self, modname, context_file=None):
"""given a module name, return the astroid object"""
if modname in self.astroid_cache:
return self.astroid_cache[modname]
if modname == "__main__":
return self._build_stub_module(modname)
old_cwd = os.getcwd()
if context_file:
os.chdir(os.path.dirname(context_file))
try:
found_spec = self.file_from_module_name(modname, context_file)
if found_spec.type == spec.ModuleType.PY_ZIPMODULE:
module = self.zip_import_data(found_spec.location)
if module is not None:
return module
elif found_spec.type in (
spec.ModuleType.C_BUILTIN,
spec.ModuleType.C_EXTENSION,
):
if (
found_spec.type == spec.ModuleType.C_EXTENSION
and not self._can_load_extension(modname)
):
return self._build_stub_module(modname)
try:
module = modutils.load_module_from_name(modname)
except Exception as ex:
raise exceptions.AstroidImportError(
"Loading {modname} failed with:\n{error}",
modname=modname,
path=found_spec.location,
) from ex
return self.ast_from_module(module, modname)
elif found_spec.type == spec.ModuleType.PY_COMPILED:
raise exceptions.AstroidImportError(
"Unable to load compiled module {modname}.",
modname=modname,
path=found_spec.location,
)
elif found_spec.type == spec.ModuleType.PY_NAMESPACE:
return self._build_namespace_module(
modname, found_spec.submodule_search_locations
)
if found_spec.location is None:
raise exceptions.AstroidImportError(
"Can't find a file for module {modname}.", modname=modname
)
return self.ast_from_file(found_spec.location, modname, fallback=False)
except exceptions.AstroidBuildingError as e:
for hook in self._failed_import_hooks:
try:
return hook(modname)
except exceptions.AstroidBuildingError:
pass
raise e
finally:
os.chdir(old_cwd)
|
given a module name, return the astroid object
|
async def _connect(self):
"""
Connect to the stream
Returns
-------
asyncio.coroutine
The streaming response
"""
logger.debug("connecting to the stream")
await self.client.setup
if self.session is None:
self.session = self.client._session
kwargs = await self.client.headers.prepare_request(**self.kwargs)
request = self.client.error_handler(self.session.request)
return await request(timeout=0, **kwargs)
|
Connect to the stream
Returns
-------
asyncio.coroutine
The streaming response
|
def packageInfo(self):
"""gets the item's package information file"""
url = "%s/item.pkinfo" % self.root
params = {'f' : 'json'}
result = self._get(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port,
out_folder=tempfile.gettempdir())
return result
|
gets the item's package information file
|
def get_download_link(self):
""" Get direct download link with soudcloud's redirect system. """
url = None
if not self.get("downloadable"):
try:
url = self.client.get_location(
self.client.STREAM_URL % self.get("id"))
except serror as e:
print(e)
if not url:
try:
url = self.client.get_location(
self.client.DOWNLOAD_URL % self.get("id"))
except serror as e:
print(e)
return url
|
Get direct download link with soudcloud's redirect system.
|
def create_node(manager, name, meta_type_label, type_label, handle_id, legacy=True):
"""
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
"""
if meta_type_label not in META_TYPES:
raise exceptions.MetaLabelNamingError(meta_type_label)
q = """
CREATE (n:Node:%s:%s { name: { name }, handle_id: { handle_id }})
RETURN n
""" % (meta_type_label, type_label)
with manager.session as s:
if legacy:
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n'].properties
return s.run(q, {'name': name, 'handle_id': handle_id}).single()['n']
|
Creates a node with the mandatory attributes name and handle_id also sets type label.
:param manager: Manager to handle sessions and transactions
:param name: Node name
:param meta_type_label: Node meta type
:param type_label: Node label
:param handle_id: Unique id
:param legacy: Backwards compatibility
:type manager: norduniclient.contextmanager.Neo4jDBSessionManager
:type name: str|unicode
:type meta_type_label: str|unicode
:type type_label: str|unicode
:type handle_id: str|unicode
:type legacy: Boolean
:rtype: dict|neo4j.v1.types.Node
|
def formatters(*chained_formatters):
"""
Chain formatter functions.
:param chained_formatters:
:type chained_formatters:
:return:
:rtype:
"""
def formatters_chain(input_string): # pylint:disable=missing-docstring
for chained_formatter in chained_formatters:
input_string = chained_formatter(input_string)
return input_string
return formatters_chain
|
Chain formatter functions.
:param chained_formatters:
:type chained_formatters:
:return:
:rtype:
|
def get_dataarg(args):
"""Retrieve the world 'data' argument from a set of input parameters.
"""
for i, arg in enumerate(args):
if is_nested_config_arg(arg):
return i, arg
elif is_std_config_arg(arg):
return i, {"config": arg}
elif isinstance(arg, (list, tuple)) and is_nested_config_arg(arg[0]):
return i, arg[0]
raise ValueError("Did not find configuration or data object in arguments: %s" % args)
|
Retrieve the world 'data' argument from a set of input parameters.
|
def plot(self, coordinates, directed=False, weighted=False, fig='current',
ax=None, edge_style=None, vertex_style=None, title=None, cmap=None):
'''Plot the graph using matplotlib in 2 or 3 dimensions.
coordinates : (n,2) or (n,3) array of vertex coordinates
directed : if True, edges have arrows indicating direction.
weighted : if True, edges are colored by their weight.
fig : a matplotlib Figure to use, or one of {'new','current'}. Defaults to
'current', which will call gcf(). Only used when ax=None.
ax : a matplotlib Axes to use. Defaults to gca()
edge_style : string or dict of styles for edges. Defaults to 'k-'
vertex_style : string or dict of styles for vertices. Defaults to 'ko'
title : string to display as the plot title
cmap : a matplotlib Colormap to use for edge weight coloring
'''
X = np.atleast_2d(coordinates)
assert 0 < X.shape[1] <= 3, 'too many dimensions to plot'
if X.shape[1] == 1:
X = np.column_stack((np.arange(X.shape[0]), X))
is_3d = (X.shape[1] == 3)
if ax is None:
ax = _get_axis(is_3d, fig)
edge_kwargs = dict(colors='k', linestyles='-', linewidths=1, zorder=1)
vertex_kwargs = dict(marker='o', c='k', s=20, edgecolor='none', zorder=2)
if edge_style is not None:
if not isinstance(edge_style, dict):
edge_style = _parse_fmt(edge_style, color_key='colors')
edge_kwargs.update(edge_style)
if vertex_style is not None:
if not isinstance(vertex_style, dict):
vertex_style = _parse_fmt(vertex_style, color_key='c')
vertex_kwargs.update(vertex_style)
if weighted and self.is_weighted():
edge_kwargs['array'] = self.edge_weights()
if directed and self.is_directed():
_directed_edges(self, X, ax, is_3d, edge_kwargs, cmap)
else:
_undirected_edges(self, X, ax, is_3d, edge_kwargs, cmap)
ax.scatter(*X.T, **vertex_kwargs)
ax.autoscale_view()
if title:
ax.set_title(title)
return pyplot.show
|
Plot the graph using matplotlib in 2 or 3 dimensions.
coordinates : (n,2) or (n,3) array of vertex coordinates
directed : if True, edges have arrows indicating direction.
weighted : if True, edges are colored by their weight.
fig : a matplotlib Figure to use, or one of {'new','current'}. Defaults to
'current', which will call gcf(). Only used when ax=None.
ax : a matplotlib Axes to use. Defaults to gca()
edge_style : string or dict of styles for edges. Defaults to 'k-'
vertex_style : string or dict of styles for vertices. Defaults to 'ko'
title : string to display as the plot title
cmap : a matplotlib Colormap to use for edge weight coloring
|
def connect(self, component):
"""Connect two ThreadPools.
The ``in_queue`` of the second pool will be set as the ``out_queue`` of
the current pool, thus all the output will be input to the second pool.
Args:
component (ThreadPool): the ThreadPool to be connected.
Returns:
ThreadPool: the modified second ThreadPool.
"""
if not isinstance(component, ThreadPool):
raise TypeError('"component" must be a ThreadPool object')
component.in_queue = self.out_queue
return component
|
Connect two ThreadPools.
The ``in_queue`` of the second pool will be set as the ``out_queue`` of
the current pool, thus all the output will be input to the second pool.
Args:
component (ThreadPool): the ThreadPool to be connected.
Returns:
ThreadPool: the modified second ThreadPool.
|
def mixedSuites(self, tests):
"""The complex case where there are tests that don't all share
the same context. Groups tests into suites with common ancestors,
according to the following (essentially tail-recursive) procedure:
Starting with the context of the first test, if it is not
None, look for tests in the remaining tests that share that
ancestor. If any are found, group into a suite with that
ancestor as the context, and replace the current suite with
that suite. Continue this process for each ancestor of the
first test, until all ancestors have been processed. At this
point if any tests remain, recurse with those tests as the
input, returning a list of the common suite (which may be the
suite or test we started with, if no common tests were found)
plus the results of recursion.
"""
if not tests:
return []
head = tests.pop(0)
if not tests:
return [head] # short circuit when none are left to combine
suite = head # the common ancestry suite, so far
tail = tests[:]
context = getattr(head, 'context', None)
if context is not None:
ancestors = [context] + [a for a in self.ancestry(context)]
for ancestor in ancestors:
common = [suite] # tests with ancestor in common, so far
remain = [] # tests that remain to be processed
for test in tail:
found_common = False
test_ctx = getattr(test, 'context', None)
if test_ctx is None:
remain.append(test)
continue
if test_ctx is ancestor:
common.append(test)
continue
for test_ancestor in self.ancestry(test_ctx):
if test_ancestor is ancestor:
common.append(test)
found_common = True
break
if not found_common:
remain.append(test)
if common:
suite = self.makeSuite(common, ancestor)
tail = self.mixedSuites(remain)
return [suite] + tail
|
The complex case where there are tests that don't all share
the same context. Groups tests into suites with common ancestors,
according to the following (essentially tail-recursive) procedure:
Starting with the context of the first test, if it is not
None, look for tests in the remaining tests that share that
ancestor. If any are found, group into a suite with that
ancestor as the context, and replace the current suite with
that suite. Continue this process for each ancestor of the
first test, until all ancestors have been processed. At this
point if any tests remain, recurse with those tests as the
input, returning a list of the common suite (which may be the
suite or test we started with, if no common tests were found)
plus the results of recursion.
|
def create(genome, config):
""" Receives a genome and returns its phenotype (a FeedForwardNetwork). """
# Gather expressed connections.
connections = [cg.key for cg in itervalues(genome.connections) if cg.enabled]
layers = feed_forward_layers(config.genome_config.input_keys, config.genome_config.output_keys, connections)
node_evals = []
for layer in layers:
for node in layer:
inputs = []
node_expr = [] # currently unused
for conn_key in connections:
inode, onode = conn_key
if onode == node:
cg = genome.connections[conn_key]
inputs.append((inode, cg.weight))
node_expr.append("v[{}] * {:.7e}".format(inode, cg.weight))
ng = genome.nodes[node]
aggregation_function = config.genome_config.aggregation_function_defs.get(ng.aggregation)
activation_function = config.genome_config.activation_defs.get(ng.activation)
node_evals.append((node, activation_function, aggregation_function, ng.bias, ng.response, inputs))
return FeedForwardNetwork(config.genome_config.input_keys, config.genome_config.output_keys, node_evals)
|
Receives a genome and returns its phenotype (a FeedForwardNetwork).
|
def row(self):
"""
Returns the periodic table row of the element.
"""
z = self.Z
total = 0
if 57 <= z <= 71:
return 8
elif 89 <= z <= 103:
return 9
for i in range(len(_pt_row_sizes)):
total += _pt_row_sizes[i]
if total >= z:
return i + 1
return 8
|
Returns the periodic table row of the element.
|
def to_function(var_instance, lineno=None):
""" Converts a var_instance to a function one
"""
assert isinstance(var_instance, SymbolVAR)
from symbols import FUNCTION
var_instance.__class__ = FUNCTION
var_instance.class_ = CLASS.function
var_instance.reset(lineno=lineno)
return var_instance
|
Converts a var_instance to a function one
|
def execute(self, conn, child_block_name='', child_lfn_list=[], transaction=False):
sql = ''
binds = {}
child_ds_name = ''
child_where = ''
if child_block_name:
child_ds_name = child_block_name.split('#')[0]
parent_where = " where d.dataset = :child_ds_name ))"
binds ={"child_ds_name": child_ds_name}
else:
dbsExceptionHandler('dbsException-invalid-input', "Missing child block_name for listFileParentsByLumi. ")
#
if not child_lfn_list:
# most use cases
child_where = " where b.block_name = :child_block_name )"
binds.update({"child_block_name": child_block_name})
sql = """
with
parents as
(
""" +\
self.parent_sql +\
parent_where +\
"""),
"""+\
"""
children as
(
""" +\
self.child_sql +\
child_where +\
""")
select distinct cid, pid from children c
inner join parents p on c.R = p.R and c.L = p.L
"""
else:
# not commom
child_where = """ where b.block_name = :child_block_name
and f.logical_file_name in (SELECT TOKEN FROM TOKEN_GENERATOR) ))
"""
lfn_generator, bind = create_token_generator(child_lfn_list)
binds.update(bind)
sql = lfn_generator +\
"""
with
parents as
(
""" +\
self.parent_sql +\
parent_where +\
"""),
"""+\
"""
children as
(
""" +\
self.child_sql +\
child_where +\
""")
select distinct cid, pid from children c
inner join parents p on c.R = p.R and c.L = p.L
"""
print(sql)
r = self.dbi.processData(sql, binds, conn, transaction=transaction)
#print(self.format(r))
return self.format(r)
"""
cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True)
for i in cursors:
d = self.formatCursor(i, size=100)
if isinstance(d, list) or isinstance(d, GeneratorType):
for elem in d:
yield elem
elif d:
yield d
"""
|
cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True)
for i in cursors:
d = self.formatCursor(i, size=100)
if isinstance(d, list) or isinstance(d, GeneratorType):
for elem in d:
yield elem
elif d:
yield d
|
def value(self):
""" returns the class as a dictionary """
r = {}
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for a in attributes:
if a != "value":
val = getattr(self, a)
if val is not None:
r[a] = val
return r
|
returns the class as a dictionary
|
def contains_ignoring_case(self, *items):
"""Asserts that val is string and contains the given item or items."""
if len(items) == 0:
raise ValueError('one or more args must be given')
if isinstance(self.val, str_types):
if len(items) == 1:
if not isinstance(items[0], str_types):
raise TypeError('given arg must be a string')
if items[0].lower() not in self.val.lower():
self._err('Expected <%s> to case-insensitive contain item <%s>, but did not.' % (self.val, items[0]))
else:
missing = []
for i in items:
if not isinstance(i, str_types):
raise TypeError('given args must all be strings')
if i.lower() not in self.val.lower():
missing.append(i)
if missing:
self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing)))
elif isinstance(self.val, Iterable):
missing = []
for i in items:
if not isinstance(i, str_types):
raise TypeError('given args must all be strings')
found = False
for v in self.val:
if not isinstance(v, str_types):
raise TypeError('val items must all be strings')
if i.lower() == v.lower():
found = True
break
if not found:
missing.append(i)
if missing:
self._err('Expected <%s> to case-insensitive contain items %s, but did not contain %s.' % (self.val, self._fmt_items(items), self._fmt_items(missing)))
else:
raise TypeError('val is not a string or iterable')
return self
|
Asserts that val is string and contains the given item or items.
|
def tagre(tag, attribute, value, quote='"', before="", after=""):
"""Return a regular expression matching the given HTML tag, attribute
and value. It matches the tag and attribute names case insensitive,
and skips arbitrary whitespace and leading HTML attributes. The "<>" at
the start and end of the HTML tag is also matched.
@param tag: the tag name
@ptype tag: string
@param attribute: the attribute name
@ptype attribute: string
@param value: the attribute value
@ptype value: string
@param quote: the attribute quote (default ")
@ptype quote: string
@param after: match after attribute value but before end
@ptype after: string
@return: the generated regular expression suitable for re.compile()
@rtype: string
"""
if before:
prefix = r"[^>]*%s[^>]*\s+" % before
else:
prefix = r"(?:[^>]*\s+)?"
attrs = dict(
tag=case_insensitive_re(tag),
attribute=case_insensitive_re(attribute),
value=value,
quote=quote,
prefix=prefix,
after=after,
)
return r'<\s*%(tag)s\s+%(prefix)s%(attribute)s\s*=\s*%(quote)s%(value)s%(quote)s[^>]*%(after)s[^>]*>' % attrs
|
Return a regular expression matching the given HTML tag, attribute
and value. It matches the tag and attribute names case insensitive,
and skips arbitrary whitespace and leading HTML attributes. The "<>" at
the start and end of the HTML tag is also matched.
@param tag: the tag name
@ptype tag: string
@param attribute: the attribute name
@ptype attribute: string
@param value: the attribute value
@ptype value: string
@param quote: the attribute quote (default ")
@ptype quote: string
@param after: match after attribute value but before end
@ptype after: string
@return: the generated regular expression suitable for re.compile()
@rtype: string
|
def serialize_checks(check_set):
"""
Serialize a check_set for raphael
"""
check_set_list = []
for check in check_set.all()[:25]:
check_set_list.append(
{
'datetime': check.checked_datetime.isoformat(),
'value': check.response_time,
'success': 1 if check.success else 0
}
)
return check_set_list
|
Serialize a check_set for raphael
|
def firwin_bpf(N_taps, f1, f2, fs = 1.0, pass_zero=False):
"""
Design a windowed FIR bandpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
"""
return signal.firwin(N_taps,2*(f1,f2)/fs,pass_zero=pass_zero)
|
Design a windowed FIR bandpass filter in terms of passband
critical frequencies f1 < f2 in Hz relative to sampling rate
fs in Hz. The number of taps must be provided.
Mark Wickert October 2016
|
def parse(directive):
"""
Given a string in the format `scope:directive`, or simply `scope`
or `directive`, return a Placement object suitable for passing
back over the websocket API.
"""
if not directive:
# Handle null case
return None
if isinstance(directive, (list, tuple)):
results = []
for d in directive:
results.extend(parse(d))
return results
if isinstance(directive, (dict, client.Placement)):
# We've been handed something that we can simply hand back to
# the api. (Forwards compatibility)
return [directive]
# Juju 2.0 can't handle lxc containers.
directive = directive.replace('lxc', 'lxd')
if ":" in directive:
# Planner has given us a scope and directive in string form
scope, directive = directive.split(":")
return [client.Placement(scope=scope, directive=directive)]
if directive.isdigit():
# Planner has given us a machine id (we rely on juju core to
# verify its validity.)
return [client.Placement(scope=MACHINE_SCOPE, directive=directive)]
if "/" in directive:
# e.g. "0/lxd/0"
# https://github.com/juju/juju/blob/master/instance/placement_test.go#L29
return [
client.Placement(scope=MACHINE_SCOPE, directive=directive),
]
# Planner has probably given us a container type. Leave it up to
# juju core to verify that it is valid.
return [client.Placement(scope=directive)]
|
Given a string in the format `scope:directive`, or simply `scope`
or `directive`, return a Placement object suitable for passing
back over the websocket API.
|
def _bls_runner(times,
mags,
nfreq,
freqmin,
stepsize,
nbins,
minduration,
maxduration):
'''This runs the pyeebls.eebls function using the given inputs.
Parameters
----------
times,mags : np.array
The input magnitude time-series to search for transits.
nfreq : int
The number of frequencies to use when searching for transits.
freqmin : float
The minimum frequency of the period-search -> max period that will be
used for the search.
stepsize : float
The step-size in frequency to use to generate a frequency-grid.
nbins : int
The number of phase bins to use.
minduration : float
The minimum fractional transit duration that will be considered.
maxduration : float
The maximum fractional transit duration that will be considered.
Returns
-------
dict
Returns a dict of the form::
{
'power': the periodogram power array,
'bestperiod': the best period found,
'bestpower': the highest peak of the periodogram power,
'transdepth': transit depth found by eebls.f,
'transduration': transit duration found by eebls.f,
'transingressbin': transit ingress bin found by eebls.f,
'transegressbin': transit egress bin found by eebls.f,
}
'''
workarr_u = npones(times.size)
workarr_v = npones(times.size)
blsresult = eebls(times, mags,
workarr_u, workarr_v,
nfreq, freqmin, stepsize,
nbins, minduration, maxduration)
return {'power':blsresult[0],
'bestperiod':blsresult[1],
'bestpower':blsresult[2],
'transdepth':blsresult[3],
'transduration':blsresult[4],
'transingressbin':blsresult[5],
'transegressbin':blsresult[6]}
|
This runs the pyeebls.eebls function using the given inputs.
Parameters
----------
times,mags : np.array
The input magnitude time-series to search for transits.
nfreq : int
The number of frequencies to use when searching for transits.
freqmin : float
The minimum frequency of the period-search -> max period that will be
used for the search.
stepsize : float
The step-size in frequency to use to generate a frequency-grid.
nbins : int
The number of phase bins to use.
minduration : float
The minimum fractional transit duration that will be considered.
maxduration : float
The maximum fractional transit duration that will be considered.
Returns
-------
dict
Returns a dict of the form::
{
'power': the periodogram power array,
'bestperiod': the best period found,
'bestpower': the highest peak of the periodogram power,
'transdepth': transit depth found by eebls.f,
'transduration': transit duration found by eebls.f,
'transingressbin': transit ingress bin found by eebls.f,
'transegressbin': transit egress bin found by eebls.f,
}
|
def l2_regularizer(weight=1.0, scope=None):
"""Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
"""
def regularizer(tensor):
with tf.name_scope(scope, 'L2Regularizer', [tensor]):
l2_weight = tf.convert_to_tensor(weight,
dtype=tensor.dtype.base_dtype,
name='weight')
return tf.multiply(l2_weight, tf.nn.l2_loss(tensor), name='value')
return regularizer
|
Define a L2 regularizer.
Args:
weight: scale the loss by this factor.
scope: Optional scope for name_scope.
Returns:
a regularizer function.
|
def _year_info_pq(self, year, keyword):
"""Returns a PyQuery object containing the info from the meta div at
the top of the team year page with the given keyword.
:year: Int representing the season.
:keyword: A keyword to filter to a single p tag in the meta div.
:returns: A PyQuery object for the selected p element.
"""
doc = self.get_year_doc(year)
p_tags = doc('div#meta div:not(.logo) p')
texts = [p_tag.text_content().strip() for p_tag in p_tags]
try:
return next(
pq(p_tag) for p_tag, text in zip(p_tags, texts)
if keyword.lower() in text.lower()
)
except StopIteration:
if len(texts):
raise ValueError('Keyword not found in any p tag.')
else:
raise ValueError('No meta div p tags found.')
|
Returns a PyQuery object containing the info from the meta div at
the top of the team year page with the given keyword.
:year: Int representing the season.
:keyword: A keyword to filter to a single p tag in the meta div.
:returns: A PyQuery object for the selected p element.
|
def write_implied_format(self, path, jpeg_quality=0, jpeg_progressive=0):
"""Write pix to the filename, with the extension indicating format.
jpeg_quality -- quality (iff JPEG; 1 - 100, 0 for default)
jpeg_progressive -- (iff JPEG; 0 for baseline seq., 1 for progressive)
"""
filename = fspath(path)
with _LeptonicaErrorTrap():
lept.pixWriteImpliedFormat(
os.fsencode(filename), self._cdata, jpeg_quality, jpeg_progressive
)
|
Write pix to the filename, with the extension indicating format.
jpeg_quality -- quality (iff JPEG; 1 - 100, 0 for default)
jpeg_progressive -- (iff JPEG; 0 for baseline seq., 1 for progressive)
|
def message_to_dict(msg):
"""Convert an email message into a dictionary.
This function transforms an `email.message.Message` object
into a dictionary. Headers are stored as key:value pairs
while the body of the message is stored inside `body` key.
Body may have two other keys inside, 'plain', for plain body
messages and 'html', for HTML encoded messages.
The returned dictionary has the type `requests.structures.CaseInsensitiveDict`
due to same headers with different case formats can appear in
the same message.
:param msg: email message of type `email.message.Message`
:returns : dictionary of type `requests.structures.CaseInsensitiveDict`
:raises ParseError: when an error occurs transforming the message
to a dictionary
"""
def parse_headers(msg):
headers = {}
for header, value in msg.items():
hv = []
for text, charset in email.header.decode_header(value):
if type(text) == bytes:
charset = charset if charset else 'utf-8'
try:
text = text.decode(charset, errors='surrogateescape')
except (UnicodeError, LookupError):
# Try again with a 7bit encoding
text = text.decode('ascii', errors='surrogateescape')
hv.append(text)
v = ' '.join(hv)
headers[header] = v if v else None
return headers
def parse_payload(msg):
body = {}
if not msg.is_multipart():
payload = decode_payload(msg)
subtype = msg.get_content_subtype()
body[subtype] = [payload]
else:
# Include all the attached texts if it is multipart
# Ignores binary parts by default
for part in email.iterators.typed_subpart_iterator(msg):
payload = decode_payload(part)
subtype = part.get_content_subtype()
body.setdefault(subtype, []).append(payload)
return {k: '\n'.join(v) for k, v in body.items()}
def decode_payload(msg_or_part):
charset = msg_or_part.get_content_charset('utf-8')
payload = msg_or_part.get_payload(decode=True)
try:
payload = payload.decode(charset, errors='surrogateescape')
except (UnicodeError, LookupError):
# Try again with a 7bit encoding
payload = payload.decode('ascii', errors='surrogateescape')
return payload
# The function starts here
message = requests.structures.CaseInsensitiveDict()
if isinstance(msg, mailbox.mboxMessage):
message['unixfrom'] = msg.get_from()
else:
message['unixfrom'] = None
try:
for k, v in parse_headers(msg).items():
message[k] = v
message['body'] = parse_payload(msg)
except UnicodeError as e:
raise ParseError(cause=str(e))
return message
|
Convert an email message into a dictionary.
This function transforms an `email.message.Message` object
into a dictionary. Headers are stored as key:value pairs
while the body of the message is stored inside `body` key.
Body may have two other keys inside, 'plain', for plain body
messages and 'html', for HTML encoded messages.
The returned dictionary has the type `requests.structures.CaseInsensitiveDict`
due to same headers with different case formats can appear in
the same message.
:param msg: email message of type `email.message.Message`
:returns : dictionary of type `requests.structures.CaseInsensitiveDict`
:raises ParseError: when an error occurs transforming the message
to a dictionary
|
def update(self, name=None, metadata=None):
"""
Updates this webhook. One or more of the parameters may be specified.
"""
return self.policy.update_webhook(self, name=name, metadata=metadata)
|
Updates this webhook. One or more of the parameters may be specified.
|
def sparse(x0, rho, gamma):
"""
Proximal operator for the l1 norm (induces sparsity)
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
"""
lmbda = float(gamma) / rho
return (x0 - lmbda) * (x0 >= lmbda) + (x0 + lmbda) * (x0 <= -lmbda)
|
Proximal operator for the l1 norm (induces sparsity)
Parameters
----------
x0 : array_like
The starting or initial point used in the proximal update step
rho : float
Momentum parameter for the proximal step (larger value -> stays closer to x0)
gamma : float
A constant that weights how strongly to enforce the constraint
Returns
-------
theta : array_like
The parameter vector found after running the proximal update step
|
def _sample_condition(exp_condition, frame_times, oversampling=50,
min_onset=-24):
"""Make a possibly oversampled event regressor from condition information.
Parameters
----------
exp_condition : arraylike of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
frame_times : array of shape(n_scans)
sample time points
over_sampling : int, optional
factor for oversampling event regressor
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
regressor: array of shape(over_sampling * n_scans)
possibly oversampled event regressor
hr_frame_times : array of shape(over_sampling * n_scans)
time points used for regressor sampling
"""
# Find the high-resolution frame_times
n = frame_times.size
min_onset = float(min_onset)
n_hr = ((n - 1) * 1. / (frame_times.max() - frame_times.min()) *
(frame_times.max() * (1 + 1. / (n - 1)) - frame_times.min() -
min_onset) * oversampling) + 1
hr_frame_times = np.linspace(frame_times.min() + min_onset,
frame_times.max() * (1 + 1. / (n - 1)),
np.rint(n_hr).astype(np.int))
# Get the condition information
onsets, durations, values = tuple(map(np.asanyarray, exp_condition))
if (onsets < frame_times[0] + min_onset).any():
warnings.warn(('Some stimulus onsets are earlier than %s in the'
' experiment and are thus not considered in the model'
% (frame_times[0] + min_onset)), UserWarning)
# Set up the regressor timecourse
tmax = len(hr_frame_times)
regressor = np.zeros_like(hr_frame_times).astype(np.float)
t_onset = np.minimum(np.searchsorted(hr_frame_times, onsets), tmax - 1)
regressor[t_onset] += values
t_offset = np.minimum(
np.searchsorted(hr_frame_times, onsets + durations),
tmax - 1)
# Handle the case where duration is 0 by offsetting at t + 1
for i, t in enumerate(t_offset):
if t < (tmax - 1) and t == t_onset[i]:
t_offset[i] += 1
regressor[t_offset] -= values
regressor = np.cumsum(regressor)
return regressor, hr_frame_times
|
Make a possibly oversampled event regressor from condition information.
Parameters
----------
exp_condition : arraylike of shape (3, n_events)
yields description of events for this condition as a
(onsets, durations, amplitudes) triplet
frame_times : array of shape(n_scans)
sample time points
over_sampling : int, optional
factor for oversampling event regressor
min_onset : float, optional
minimal onset relative to frame_times[0] (in seconds)
events that start before frame_times[0] + min_onset are not considered
Returns
-------
regressor: array of shape(over_sampling * n_scans)
possibly oversampled event regressor
hr_frame_times : array of shape(over_sampling * n_scans)
time points used for regressor sampling
|
def remoteLocation_resolve(self, d_remote):
"""
Resolve the remote path location
:param d_remote: the "remote" specification
:return: a string representation of the remote path
"""
b_status = False
str_remotePath = ""
if 'path' in d_remote.keys():
str_remotePath = d_remote['path']
b_status = True
if 'key' in d_remote.keys():
d_ret = self.storage_resolveBasedOnKey(key = d_remote['key'])
if d_ret['status']:
b_status = True
str_remotePath = d_ret['path']
return {
'status': b_status,
'path': str_remotePath
}
|
Resolve the remote path location
:param d_remote: the "remote" specification
:return: a string representation of the remote path
|
def add_words(self):
"""The data block must fill the entire data capacity of the QR code.
If we fall short, then we must add bytes to the end of the encoded
data field. The value of these bytes are specified in the standard.
"""
data_blocks = len(self.buffer.getvalue()) // 8
total_blocks = tables.data_capacity[self.version][self.error][0] // 8
needed_blocks = total_blocks - data_blocks
if needed_blocks == 0:
return None
#This will return item1, item2, item1, item2, etc.
block = itertools.cycle(['11101100', '00010001'])
#Create a string of the needed blocks
return ''.join([next(block) for x in range(needed_blocks)])
|
The data block must fill the entire data capacity of the QR code.
If we fall short, then we must add bytes to the end of the encoded
data field. The value of these bytes are specified in the standard.
|
def get_b(self):
"""
Get Galactic latitude (b) corresponding to the current position
:return: Latitude
"""
try:
return self.b.value
except AttributeError:
# Transform from L,B to R.A., Dec
return self.sky_coord.transform_to('galactic').b.value
|
Get Galactic latitude (b) corresponding to the current position
:return: Latitude
|
def format_result(line, line_num, txt):
""" highlight the search result """
return ' ' + str(line_num) + ': ' + line.replace(txt, '<span style="background-color: #FFFF00">' + txt + '</span>')
|
highlight the search result
|
def get_users(self, search=None, page=1, per_page=20, **kwargs):
"""
Returns a list of users from the Gitlab server
:param search: Optional search query
:param page: Page number (default: 1)
:param per_page: Number of items to list per page (default: 20, max: 100)
:return: List of Dictionaries containing users
:raise: HttpError if invalid response returned
"""
if search:
return self.get('/users', page=page, per_page=per_page, search=search, **kwargs)
return self.get('/users', page=page, per_page=per_page, **kwargs)
|
Returns a list of users from the Gitlab server
:param search: Optional search query
:param page: Page number (default: 1)
:param per_page: Number of items to list per page (default: 20, max: 100)
:return: List of Dictionaries containing users
:raise: HttpError if invalid response returned
|
def _parse_value_pb(value_pb, field_type):
"""Convert a Value protobuf to cell data.
:type value_pb: :class:`~google.protobuf.struct_pb2.Value`
:param value_pb: protobuf to convert
:type field_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type`
:param field_type: type code for the value
:rtype: varies on field_type
:returns: value extracted from value_pb
:raises ValueError: if unknown type is passed
"""
if value_pb.HasField("null_value"):
return None
if field_type.code == type_pb2.STRING:
result = value_pb.string_value
elif field_type.code == type_pb2.BYTES:
result = value_pb.string_value.encode("utf8")
elif field_type.code == type_pb2.BOOL:
result = value_pb.bool_value
elif field_type.code == type_pb2.INT64:
result = int(value_pb.string_value)
elif field_type.code == type_pb2.FLOAT64:
if value_pb.HasField("string_value"):
result = float(value_pb.string_value)
else:
result = value_pb.number_value
elif field_type.code == type_pb2.DATE:
result = _date_from_iso8601_date(value_pb.string_value)
elif field_type.code == type_pb2.TIMESTAMP:
DatetimeWithNanoseconds = datetime_helpers.DatetimeWithNanoseconds
result = DatetimeWithNanoseconds.from_rfc3339(value_pb.string_value)
elif field_type.code == type_pb2.ARRAY:
result = [
_parse_value_pb(item_pb, field_type.array_element_type)
for item_pb in value_pb.list_value.values
]
elif field_type.code == type_pb2.STRUCT:
result = [
_parse_value_pb(item_pb, field_type.struct_type.fields[i].type)
for (i, item_pb) in enumerate(value_pb.list_value.values)
]
else:
raise ValueError("Unknown type: %s" % (field_type,))
return result
|
Convert a Value protobuf to cell data.
:type value_pb: :class:`~google.protobuf.struct_pb2.Value`
:param value_pb: protobuf to convert
:type field_type: :class:`~google.cloud.spanner_v1.proto.type_pb2.Type`
:param field_type: type code for the value
:rtype: varies on field_type
:returns: value extracted from value_pb
:raises ValueError: if unknown type is passed
|
def read_clusters(self, min_cluster_size):
"""Read and parse OSLOM clusters output file."""
num_found = 0
clusters = []
with open(self.get_path(OslomRunner.OUTPUT_FILE), "r") as reader:
# Read the output file every two lines
for line1, line2 in itertools.izip_longest(*[reader] * 2):
info = OslomRunner.RE_INFOLINE.match(line1.strip()).groups()
nodes = line2.strip().split(" ")
if len(nodes) >= min_cluster_size: # Apply min_cluster_size
clusters.append({
"id": int(info[0]),
"bs": float(info[2]),
"nodes": [{"id": self.id_remapper.get_str_id(int(n))} for n in nodes],
})
num_found += 1
return {"num_found": num_found, "clusters": clusters}
|
Read and parse OSLOM clusters output file.
|
def _maybe_notify_connected(self, arg):
"""
Internal helper.
.callback or .errback on all Deferreds we've returned from
`when_connected`
"""
if self._connected_listeners is None:
return
for d in self._connected_listeners:
# Twisted will turn this into an errback if "arg" is a
# Failure
d.callback(arg)
self._connected_listeners = None
|
Internal helper.
.callback or .errback on all Deferreds we've returned from
`when_connected`
|
def draw_variable_local(self, size):
""" Simulate from the Normal distribution using instance values
Parameters
----------
size : int
How many simulations to perform
Returns
----------
np.ndarray of Normal random variable
"""
return ss.norm.rvs(loc=self.mu0, scale=self.sigma0, size=size)
|
Simulate from the Normal distribution using instance values
Parameters
----------
size : int
How many simulations to perform
Returns
----------
np.ndarray of Normal random variable
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.