code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def handle_msec_timestamp(self, m, master):
'''special handling for MAVLink packets with a time_boot_ms field'''
if m.get_type() == 'GLOBAL_POSITION_INT':
# this is fix time, not boot time
return
msec = m.time_boot_ms
if msec + 30000 < master.highest_msec:
self.say('Time has wrapped')
print('Time has wrapped', msec, master.highest_msec)
self.status.highest_msec = msec
for mm in self.mpstate.mav_master:
mm.link_delayed = False
mm.highest_msec = msec
return
# we want to detect when a link is delayed
master.highest_msec = msec
if msec > self.status.highest_msec:
self.status.highest_msec = msec
if msec < self.status.highest_msec and len(self.mpstate.mav_master) > 1 and self.mpstate.settings.checkdelay:
master.link_delayed = True
else:
master.link_delayed = False
|
special handling for MAVLink packets with a time_boot_ms field
|
def atanh(x, context=None):
"""
Return the inverse hyperbolic tangent of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_atanh,
(BigFloat._implicit_convert(x),),
context,
)
|
Return the inverse hyperbolic tangent of x.
|
def mode_str_to_int(modestr):
"""
:param modestr: string like 755 or 644 or 100644 - only the last 6 chars will be used
:return:
String identifying a mode compatible to the mode methods ids of the
stat module regarding the rwx permissions for user, group and other,
special flags and file system flags, i.e. whether it is a symlink
for example."""
mode = 0
for iteration, char in enumerate(reversed(modestr[-6:])):
mode += int(char) << iteration * 3
# END for each char
return mode
|
:param modestr: string like 755 or 644 or 100644 - only the last 6 chars will be used
:return:
String identifying a mode compatible to the mode methods ids of the
stat module regarding the rwx permissions for user, group and other,
special flags and file system flags, i.e. whether it is a symlink
for example.
|
def regex_find(pattern, content):
"""Find the given 'pattern' in 'content'"""
find = re.findall(pattern, content)
if not find:
cij.err("pattern <%r> is invalid, no matches!" % pattern)
cij.err("content: %r" % content)
return ''
if len(find) >= 2:
cij.err("pattern <%r> is too simple, matched more than 2!" % pattern)
cij.err("content: %r" % content)
return ''
return find[0]
|
Find the given 'pattern' in 'content
|
def update(self, *args, **kwargs):
'''Preserves order if given an assoc list.
'''
arg = dict_arg(*args, **kwargs)
if isinstance(arg, list):
for key, val in arg:
self[key] = val
else:
super(AssocDict, self).update(arg)
|
Preserves order if given an assoc list.
|
def directive(self, name, default=None):
"""Returns the loaded directive with the specified name, or default if passed name is not present"""
return getattr(self, '_directives', {}).get(name, hug.defaults.directives.get(name, default))
|
Returns the loaded directive with the specified name, or default if passed name is not present
|
def update_server_map(self, config):
"""update server_map ({member_id:hostname})"""
self.server_map = dict([(member['_id'], member['host']) for member in config['members']])
|
update server_map ({member_id:hostname})
|
def create_queue(self, queue_name, queue=None, fail_on_exist=False):
'''
Creates a new queue. Once created, this queue's resource manifest is
immutable.
queue_name:
Name of the queue to create.
queue:
Queue object to create.
fail_on_exist:
Specify whether to throw an exception when the queue exists.
'''
_validate_not_none('queue_name', queue_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(queue_name) + ''
request.body = _get_request_body(_convert_queue_to_xml(queue))
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
|
Creates a new queue. Once created, this queue's resource manifest is
immutable.
queue_name:
Name of the queue to create.
queue:
Queue object to create.
fail_on_exist:
Specify whether to throw an exception when the queue exists.
|
def dedent(s):
"""Removes the hanging dedent from all the first line of a string."""
head, _, tail = s.partition('\n')
dedented_tail = textwrap.dedent(tail)
result = "{head}\n{tail}".format(
head=head,
tail=dedented_tail)
return result
|
Removes the hanging dedent from all the first line of a string.
|
def msg2agent(msg, processor=None, legacy=False, **config):
""" Return the single username who is the "agent" for an event.
An "agent" is the one responsible for the event taking place, for example,
if one person gives karma to another, then both usernames are returned by
msg2usernames, but only the one who gave the karma is returned by
msg2agent.
If the processor registered to handle the message does not provide an
agent method, then the *first* user returned by msg2usernames is returned
(whether that is correct or not). Here we assume that if a processor
implements `agent`, then it knows what it is doing and we should trust
that. But if it does not implement it, we'll try our best guess.
If there are no users returned by msg2usernames, then None is returned.
"""
if processor.agent is not NotImplemented:
return processor.agent(msg, **config)
else:
usernames = processor.usernames(msg, **config)
# usernames is a set(), which doesn't support indexing.
if usernames:
return usernames.pop()
# default to None if we can't find anything
return None
|
Return the single username who is the "agent" for an event.
An "agent" is the one responsible for the event taking place, for example,
if one person gives karma to another, then both usernames are returned by
msg2usernames, but only the one who gave the karma is returned by
msg2agent.
If the processor registered to handle the message does not provide an
agent method, then the *first* user returned by msg2usernames is returned
(whether that is correct or not). Here we assume that if a processor
implements `agent`, then it knows what it is doing and we should trust
that. But if it does not implement it, we'll try our best guess.
If there are no users returned by msg2usernames, then None is returned.
|
def rate_limit_info():
""" Returns (requests_remaining, minutes_to_reset) """
import json
import time
r = requests.get(gh_url + "/rate_limit", auth=login.auth())
out = json.loads(r.text)
mins = (out["resources"]["core"]["reset"]-time.time())/60
return out["resources"]["core"]["remaining"], mins
|
Returns (requests_remaining, minutes_to_reset)
|
async def on_message(message):
"""The on_message event handler for this module
Args:
message (discord.Message): Input message
"""
# Simplify message info
server = message.server
author = message.author
channel = message.channel
content = message.content
data = datatools.get_data()
if not data["discord"]["servers"][server.id][_data.modulename]["activated"]:
return
# Only reply to server messages and don't reply to myself
if server is not None and author != channel.server.me:
# Commands section
prefix = data["discord"]["servers"][server.id]["prefix"]
if content.startswith(prefix):
# Parse message
package = content.split(" ")
command = package[0][len(prefix):]
args = package[1:]
arg = ' '.join(args)
# Lock on to server if not yet locked
if server.id not in _data.cache or _data.cache[server.id].state == 'destroyed':
_data.cache[server.id] = _musicplayer.MusicPlayer(server.id)
# Remove message
if command in ['play', 'playnext', 'playnow', 'playshuffle', 'insert',
'pause', 'resume', 'skip', 'remove',
'rewind', 'restart', 'shuffle', 'volume',
'stop', 'destroy', 'front', 'movehere',
'settopic', 'cleartopic', 'notopic', 'loop']:
try:
await client.delete_message(message)
except discord.errors.NotFound:
logger.warning("Could not delete music player command message - NotFound")
except discord.errors.Forbidden:
logger.warning("Could not delete music player command message - Forbidden")
# Commands
if command == 'play':
await _data.cache[server.id].play(author, channel, arg)
if command == 'playnext':
await _data.cache[server.id].play(author, channel, arg, index=1)
if command == 'playnow':
await _data.cache[server.id].play(author, channel, arg, index=1, stop_current=True)
if command == 'playshuffle':
await _data.cache[server.id].play(author, channel, arg, shuffle=True)
if command == 'insert':
if len(args) >= 2:
index = args[0]
query = ' '.join(args[1:])
await _data.cache[server.id].play(author, channel, query, index=index)
else:
await _data.cache[server.id].play(author, channel, arg)
elif command == 'pause':
await _data.cache[server.id].pause()
elif command == 'resume':
await _data.cache[server.id].resume()
elif command == 'skip':
await _data.cache[server.id].skip(query=arg)
elif command == 'remove':
await _data.cache[server.id].remove(index=arg)
elif command == 'rewind':
await _data.cache[server.id].rewind(query=arg)
elif command == 'restart':
await _data.cache[server.id].rewind(query="0")
elif command == 'shuffle':
await _data.cache[server.id].shuffle()
elif command == 'loop':
await _data.cache[server.id].set_loop(arg)
elif command == 'stop':
await _data.cache[server.id].stop(log_stop=True)
elif command == 'volume':
await _data.cache[server.id].setvolume(arg)
elif command == 'settopic':
await _data.cache[server.id].set_topic_channel(channel)
elif command == 'cleartopic' or command == 'notopic':
await _data.cache[server.id].clear_topic_channel(channel)
elif command == 'nowplaying':
await _data.cache[server.id].nowplaying_info(channel)
elif command == 'destroy':
await _data.cache[server.id].destroy()
elif command == 'front' or command == 'movehere':
await _data.cache[server.id].movehere(channel)
|
The on_message event handler for this module
Args:
message (discord.Message): Input message
|
def stop(self):
""" Stops the video stream and resets the clock. """
logger.debug("Stopping playback")
# Stop the clock
self.clock.stop()
# Set plauyer status to ready
self.status = READY
|
Stops the video stream and resets the clock.
|
def _tobytes(self, skipprepack = False):
'''
Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes.
:param skipprepack: if True, the prepack stage is skipped. For parser internal use.
:returns: converted bytes
'''
stream = BytesIO()
self._tostream(stream, skipprepack)
return stream.getvalue()
|
Convert the struct to bytes. This is the standard way to convert a NamedStruct to bytes.
:param skipprepack: if True, the prepack stage is skipped. For parser internal use.
:returns: converted bytes
|
def from_json(cls, data):
"""Return object based on JSON / dict input
Args:
data (dict): Dictionary containing a serialized User object
Returns:
:obj:`User`: User object representing the data
"""
user = cls()
user.user_id = data['userId']
user.username = data['username']
user.auth_system = data['authSystem']
user.roles = data['roles']
return user
|
Return object based on JSON / dict input
Args:
data (dict): Dictionary containing a serialized User object
Returns:
:obj:`User`: User object representing the data
|
def get_normalized(self):
"""Returns a vector of unit length, unless it is the zero
vector, in which case it is left as is."""
magnitude = self.get_magnitude()
if magnitude > 0:
magnitude = 1.0 / magnitude
return Point(self.x * magnitude, self.y * magnitude)
else:
return self
|
Returns a vector of unit length, unless it is the zero
vector, in which case it is left as is.
|
def fix_e702(self, result, logical):
"""Put semicolon-separated compound statement on separate lines."""
if not logical:
return [] # pragma: no cover
logical_lines = logical[2]
# Avoid applying this when indented.
# https://docs.python.org/reference/compound_stmts.html
for line in logical_lines:
if (result['id'] == 'E702' and ':' in line
and STARTSWITH_DEF_REGEX.match(line)):
return []
line_index = result['line'] - 1
target = self.source[line_index]
if target.rstrip().endswith('\\'):
# Normalize '1; \\\n2' into '1; 2'.
self.source[line_index] = target.rstrip('\n \r\t\\')
self.source[line_index + 1] = self.source[line_index + 1].lstrip()
return [line_index + 1, line_index + 2]
if target.rstrip().endswith(';'):
self.source[line_index] = target.rstrip('\n \r\t;') + '\n'
return [line_index + 1]
offset = result['column'] - 1
first = target[:offset].rstrip(';').rstrip()
second = (_get_indentation(logical_lines[0]) +
target[offset:].lstrip(';').lstrip())
# Find inline comment.
inline_comment = None
if target[offset:].lstrip(';').lstrip()[:2] == '# ':
inline_comment = target[offset:].lstrip(';')
if inline_comment:
self.source[line_index] = first + inline_comment
else:
self.source[line_index] = first + '\n' + second
return [line_index + 1]
|
Put semicolon-separated compound statement on separate lines.
|
def set_mode(self,mode):
"""Set SPI mode which controls clock polarity and phase. Should be a
numeric value 0, 1, 2, or 3. See wikipedia page for details on meaning:
http://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus
"""
if mode < 0 or mode > 3:
raise ValueError('Mode must be a value 0, 1, 2, or 3.')
self._device.mode(mode)
|
Set SPI mode which controls clock polarity and phase. Should be a
numeric value 0, 1, 2, or 3. See wikipedia page for details on meaning:
http://en.wikipedia.org/wiki/Serial_Peripheral_Interface_Bus
|
def _norm_perm_list_from_perm_dict(self, perm_dict):
"""Return a minimal, ordered, hashable list of subjects and permissions."""
high_perm_dict = self._highest_perm_dict_from_perm_dict(perm_dict)
return [
[k, list(sorted(high_perm_dict[k]))]
for k in ORDERED_PERM_LIST
if high_perm_dict.get(k, False)
]
|
Return a minimal, ordered, hashable list of subjects and permissions.
|
def isObjectClassified(self, objectName, minOverlap=None, maxL2Size=None):
"""
Return True if objectName is currently unambiguously classified by every L2
column. Classification is correct and unambiguous if the current L2 overlap
with the true object is greater than minOverlap and if the size of the L2
representation is no more than maxL2Size
:param minOverlap: min overlap to consider the object as recognized.
Defaults to half of the SDR size
:param maxL2Size: max size for the L2 representation
Defaults to 1.5 * SDR size
:return: True/False
"""
L2Representation = self.getL2Representations()
objectRepresentation = self.objectL2Representations[objectName]
sdrSize = self.config["L2Params"]["sdrSize"]
if minOverlap is None:
minOverlap = sdrSize / 2
if maxL2Size is None:
maxL2Size = 1.5*sdrSize
numCorrectClassifications = 0
for col in xrange(self.numColumns):
overlapWithObject = len(objectRepresentation[col] & L2Representation[col])
if ( overlapWithObject >= minOverlap and
len(L2Representation[col]) <= maxL2Size ):
numCorrectClassifications += 1
return numCorrectClassifications == self.numColumns
|
Return True if objectName is currently unambiguously classified by every L2
column. Classification is correct and unambiguous if the current L2 overlap
with the true object is greater than minOverlap and if the size of the L2
representation is no more than maxL2Size
:param minOverlap: min overlap to consider the object as recognized.
Defaults to half of the SDR size
:param maxL2Size: max size for the L2 representation
Defaults to 1.5 * SDR size
:return: True/False
|
def is_date_type(cls):
"""Return True if the class is a date type."""
if not isinstance(cls, type):
return False
return issubclass(cls, date) and not issubclass(cls, datetime)
|
Return True if the class is a date type.
|
def _from_pointer(pointer, incref):
"""Wrap an existing :c:type:`cairo_font_face_t *` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`FontFace` or one of its sub-classes,
depending on the face’s type.
"""
if pointer == ffi.NULL:
raise ValueError('Null pointer')
if incref:
cairo.cairo_font_face_reference(pointer)
self = object.__new__(FONT_TYPE_TO_CLASS.get(
cairo.cairo_font_face_get_type(pointer), FontFace))
FontFace.__init__(self, pointer) # Skip the subclass’s __init__
return self
|
Wrap an existing :c:type:`cairo_font_face_t *` cdata pointer.
:type incref: bool
:param incref:
Whether increase the :ref:`reference count <refcounting>` now.
:return:
A new instance of :class:`FontFace` or one of its sub-classes,
depending on the face’s type.
|
def ansible_inventory_temp_file(
self, keys=['vm-type', 'groups', 'vm-provider']
):
"""
Context manager which returns Ansible inventory written on a tempfile.
This is the same as :func:`~ansible_inventory`, only the inventory file
is written to a tempfile.
Args:
keys (list of str): Path to the keys that will be used to
create groups.
Yields:
tempfile.NamedTemporaryFile: Temp file containing the inventory
"""
lansible = LagoAnsible(self._prefix)
return lansible.get_inventory_temp_file(keys=keys)
|
Context manager which returns Ansible inventory written on a tempfile.
This is the same as :func:`~ansible_inventory`, only the inventory file
is written to a tempfile.
Args:
keys (list of str): Path to the keys that will be used to
create groups.
Yields:
tempfile.NamedTemporaryFile: Temp file containing the inventory
|
def _deserialize_key(cls, key):
"""
:type key: str
:rtype: str
"""
if key in cls._KEYS_OVERLAPPING:
return key + cls._SUFFIX_KEY_OVERLAPPING
return key
|
:type key: str
:rtype: str
|
def from_scalars(**kwargs):
"""Similar to from_arrays, but convenient for a DataFrame of length 1.
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, y=2)
:rtype: DataFrame
"""
import numpy as np
return from_arrays(**{k: np.array([v]) for k, v in kwargs.items()})
|
Similar to from_arrays, but convenient for a DataFrame of length 1.
Example:
>>> import vaex
>>> df = vaex.from_scalars(x=1, y=2)
:rtype: DataFrame
|
def _is_dtype_type(arr_or_dtype, condition):
"""
Return a boolean if the condition is satisfied for the arr_or_dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
condition : callable[Union[np.dtype, ExtensionDtypeType]]
Returns
-------
bool : if the condition is satisifed for the arr_or_dtype
"""
if arr_or_dtype is None:
return condition(type(None))
# fastpath
if isinstance(arr_or_dtype, np.dtype):
return condition(arr_or_dtype.type)
elif isinstance(arr_or_dtype, type):
if issubclass(arr_or_dtype, (PandasExtensionDtype, ExtensionDtype)):
arr_or_dtype = arr_or_dtype.type
return condition(np.dtype(arr_or_dtype).type)
elif arr_or_dtype is None:
return condition(type(None))
# if we have an array-like
if hasattr(arr_or_dtype, 'dtype'):
arr_or_dtype = arr_or_dtype.dtype
# we are not possibly a dtype
elif is_list_like(arr_or_dtype):
return condition(type(None))
try:
tipo = pandas_dtype(arr_or_dtype).type
except (TypeError, ValueError, UnicodeEncodeError):
if is_scalar(arr_or_dtype):
return condition(type(None))
return False
return condition(tipo)
|
Return a boolean if the condition is satisfied for the arr_or_dtype.
Parameters
----------
arr_or_dtype : array-like
The array-like or dtype object whose dtype we want to extract.
condition : callable[Union[np.dtype, ExtensionDtypeType]]
Returns
-------
bool : if the condition is satisifed for the arr_or_dtype
|
def hira2kata(text, ignore=''):
"""Convert Hiragana to Full-width (Zenkaku) Katakana.
Parameters
----------
text : str
Hiragana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Katakana string.
Examples
--------
>>> print(jaconv.hira2kata('ともえまみ'))
トモエマミ
>>> print(jaconv.hira2kata('まどまぎ', ignore='ど'))
マどマギ
"""
if ignore:
h2k_map = _exclude_ignorechar(ignore, H2K_TABLE.copy())
return _convert(text, h2k_map)
return _convert(text, H2K_TABLE)
|
Convert Hiragana to Full-width (Zenkaku) Katakana.
Parameters
----------
text : str
Hiragana string.
ignore : str
Characters to be ignored in converting.
Return
------
str
Katakana string.
Examples
--------
>>> print(jaconv.hira2kata('ともえまみ'))
トモエマミ
>>> print(jaconv.hira2kata('まどまぎ', ignore='ど'))
マどマギ
|
def makescacoldesc(columnname, value,
datamanagertype='',
datamanagergroup='',
options=0, maxlen=0, comment='',
valuetype='', keywords={}):
"""Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan.
"""
vtype = valuetype
if vtype == '':
vtype = _value_type_name(value)
rec2 = {'valueType': vtype,
'dataManagerType': datamanagertype,
'dataManagerGroup': datamanagergroup,
'option': options,
'maxlen': maxlen,
'comment': comment,
'keywords': keywords}
return {'name': columnname,
'desc': rec2}
|
Create description of a scalar column.
A description for a scalar column can be created from a name for
the column and a data value, which is used only to determine the
type of the column. Note that a dict value is also possible.
It is possible to create the column description in more detail
by giving the data manager name, group, option, and comment as well.
The data manager type tells which data manager (storage manager)
is used to store the columns. The data manager type and group are
explained in more detail in the `casacore Tables
<../../casacore/doc/html/group__Tables__module.html>`_ documentation.
It returns a dict with fields `name` and `desc` which can thereafter be used
to build a table description using function :func:`maketabdesc`.
`columname`
Name of column
`value`
Example data value used to determine the column's data type.
It is only used if argument `valuetype` is not given.
`datamanagertype`
Type of data manager which can be one of StandardStMan (default)
or IncrementalStMan. The latter one can save disk space if many subsequent
cells in the column will have the same value.
`datamanagergroup`
Data manager group. Only for the expert user.
`options`
Options. Need not be filled in.
`maxlen`
Maximum length of string values in a column.
Default 0 means unlimited.
`comment`
Comment: informational for user.
`valuetype`
A string giving the column's data type. Possible data types are
bool (or boolean), uchar (or byte), short, int (or integer), uint,
float, double, complex, dcomplex, and string.
'keywords'
A dict defining initial keywords for the column.
For example::
scd1 = makescacoldesc("col2", ""))
scd2 = makescacoldesc("col1", 1, "IncrementalStMan")
td = maketabdesc([scd1, scd2])
This creates a table description consisting of an integer column `col1`,
and a string column `col2`. `col1` uses the IncrementalStMan storage manager,
while `col2` uses the default storage manager StandardStMan.
|
def process_sels(self):
"""
Process soft clause selectors participating in a new core.
The negation :math:`\\neg{s}` of each selector literal
:math:`s` participating in the unsatisfiable core is added
to the list of relaxation literals, which will be later
used to create a new totalizer object in
:func:`create_sum`.
If the weight associated with a selector is equal to the
minimal weight of the core, e.g. ``self.minw``, the
selector is marked as garbage and will be removed in
:func:`filter_assumps`. Otherwise, the clause is split as
described in [1]_.
"""
# new relaxation variables
self.rels = []
for l in self.core_sels:
if self.wght[l] == self.minw:
# marking variable as being a part of the core
# so that next time it is not used as an assump
self.garbage.add(l)
# reuse assumption variable as relaxation
self.rels.append(-l)
else:
# do not remove this variable from assumps
# since it has a remaining non-zero weight
self.wght[l] -= self.minw
# it is an unrelaxed soft clause,
# a new relaxed copy of which we add to the solver
self.topv += 1
self.oracle.add_clause([l, self.topv])
self.rels.append(self.topv)
|
Process soft clause selectors participating in a new core.
The negation :math:`\\neg{s}` of each selector literal
:math:`s` participating in the unsatisfiable core is added
to the list of relaxation literals, which will be later
used to create a new totalizer object in
:func:`create_sum`.
If the weight associated with a selector is equal to the
minimal weight of the core, e.g. ``self.minw``, the
selector is marked as garbage and will be removed in
:func:`filter_assumps`. Otherwise, the clause is split as
described in [1]_.
|
def get_class(self):
"""Return a Code class based on current ErrorType value.
Returns:
enum.IntEnum: class referenced by current error type.
"""
classes = {'OFPET_HELLO_FAILED': HelloFailedCode,
'OFPET_BAD_REQUEST': BadRequestCode,
'OFPET_BAD_ACTION': BadActionCode,
'OFPET_BAD_INSTRUCTION': BadInstructionCode,
'OFPET_BAD_MATCH': BadMatchCode,
'OFPET_FLOW_MOD_FAILED': FlowModFailedCode,
'OFPET_GROUP_MOD_FAILED': GroupModFailedCode,
'OFPET_PORT_MOD_FAILED': PortModFailedCode,
'OFPET_QUEUE_OP_FAILED': QueueOpFailedCode,
'OFPET_SWITCH_CONFIG_FAILED': SwitchConfigFailedCode,
'OFPET_ROLE_REQUEST_FAILED': RoleRequestFailedCode,
'OFPET_METER_MOD_FAILED': MeterModFailedCode,
'OFPET_TABLE_MOD_FAILED': TableModFailedCode,
'OFPET_TABLE_FEATURES_FAILED': TableFeaturesFailedCode}
return classes.get(self.name, GenericFailedCode)
|
Return a Code class based on current ErrorType value.
Returns:
enum.IntEnum: class referenced by current error type.
|
def with_fakes(method):
"""Decorator that calls :func:`fudge.clear_calls` before method() and :func:`fudge.verify` afterwards.
"""
@wraps(method)
def apply_clear_and_verify(*args, **kw):
clear_calls()
method(*args, **kw)
verify() # if no exceptions
return apply_clear_and_verify
|
Decorator that calls :func:`fudge.clear_calls` before method() and :func:`fudge.verify` afterwards.
|
def _value_to_color(value, cmap):
"""Convert a value in the range [0,1] to an RGB tuple using a colormap."""
cm = plt.get_cmap(cmap)
rgba = cm(value)
return [int(round(255*v)) for v in rgba[0:3]]
|
Convert a value in the range [0,1] to an RGB tuple using a colormap.
|
def base_geodetic_crs(self):
"""The :class:`GeodeticCRS` on which this projection is based."""
base = self.element.find(GML_NS + 'baseGeodeticCRS')
href = base.attrib[XLINK_NS + 'href']
return get(href)
|
The :class:`GeodeticCRS` on which this projection is based.
|
def get_subresource_path_by(resource, subresource_path):
"""Helper function to find the resource path
:param resource: ResourceBase instance from which the path is loaded.
:param subresource_path: JSON field to fetch the value from.
Either a string, or a list of strings in case of a nested field.
It should also include the '@odata.id'
:raises: MissingAttributeError, if required path is missing.
:raises: ValueError, if path is empty.
:raises: AttributeError, if json attr not found in resource
"""
if isinstance(subresource_path, six.string_types):
subresource_path = [subresource_path]
elif not subresource_path:
raise ValueError('"subresource_path" cannot be empty')
body = resource.json
for path_item in subresource_path:
body = body.get(path_item, {})
if not body:
raise exception.MissingAttributeError(
attribute='/'.join(subresource_path), resource=resource.path)
if '@odata.id' not in body:
raise exception.MissingAttributeError(
attribute='/'.join(subresource_path)+'/@odata.id',
resource=resource.path)
return body['@odata.id']
|
Helper function to find the resource path
:param resource: ResourceBase instance from which the path is loaded.
:param subresource_path: JSON field to fetch the value from.
Either a string, or a list of strings in case of a nested field.
It should also include the '@odata.id'
:raises: MissingAttributeError, if required path is missing.
:raises: ValueError, if path is empty.
:raises: AttributeError, if json attr not found in resource
|
def _get_format_from_style(self, token, style):
""" Returns a QTextCharFormat for token by reading a Pygments style.
"""
result = QtGui.QTextCharFormat()
items = list(style.style_for_token(token).items())
for key, value in items:
if value is None and key == 'color':
# make sure to use a default visible color for the foreground
# brush
value = drift_color(self.background, 1000).name()
if value:
if key == 'color':
result.setForeground(self._get_brush(value))
elif key == 'bgcolor':
result.setBackground(self._get_brush(value))
elif key == 'bold':
result.setFontWeight(QtGui.QFont.Bold)
elif key == 'italic':
result.setFontItalic(value)
elif key == 'underline':
result.setUnderlineStyle(
QtGui.QTextCharFormat.SingleUnderline)
elif key == 'sans':
result.setFontStyleHint(QtGui.QFont.SansSerif)
elif key == 'roman':
result.setFontStyleHint(QtGui.QFont.Times)
elif key == 'mono':
result.setFontStyleHint(QtGui.QFont.TypeWriter)
if token in [Token.Literal.String, Token.Literal.String.Doc,
Token.Comment]:
# mark strings, comments and docstrings regions for further queries
result.setObjectType(result.UserObject)
return result
|
Returns a QTextCharFormat for token by reading a Pygments style.
|
def adjoin(space: int, *lists: Sequence[str]) -> str:
"""Glue together two sets of strings using `space`."""
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
max_len = max(map(len, lists))
chains = (
itertools.chain(
(x.ljust(length) for x in lst),
itertools.repeat(' ' * length, max_len - len(lst)),
)
for lst, length in zip(lists, lengths)
)
return '\n'.join(map(''.join, zip(*chains)))
|
Glue together two sets of strings using `space`.
|
def subjects(auth, label=None, project=None):
'''
Retrieve Subject tuples for subjects returned by this function.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.subjects(auth, 'AB1234C')
Subject(uri=u'/data/experiments/XNAT_S0001', label=u'AB1234C', id=u'XNAT_S0001',
project=u'MyProject')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT Subject label
:type label: str
:param project: XNAT Subject Project
:type project: str
:returns: Subject objects
:rtype: :mod:`yaxil.Subject`
'''
url = '{0}/data/subjects'.format(auth.url.rstrip('/'))
logger.debug('issuing http request %s', url)
# compile query string
columns = [
'ID',
'label',
'project'
]
payload = {
'columns': ','.join(columns)
}
if label:
payload['label'] = label
if project:
payload['project'] = project
# submit the request
r = requests.get(url, params=payload, auth=(auth.username, auth.password),
verify=CHECK_CERTIFICATE)
# validate response
if r.status_code != requests.codes.ok:
raise AccessionError('response not ok ({0}) from {1}'.format(r.status_code, r.url))
try:
results = r.json()
__quick_validate(results)
except ResultSetError as e:
raise ResultSetError('{0} from {1}'.format(e.message, r.url))
results = results['ResultSet']
if int(results['totalRecords']) == 0:
raise NoSubjectsError('no records returned from {0}'.format(r.url))
# start generating consumable results for the caller
for item in results['Result']:
yield Subject(uri=item['URI'],
id=item['ID'],
project=item['project'],
label=item['label'])
|
Retrieve Subject tuples for subjects returned by this function.
Example:
>>> import yaxil
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.subjects(auth, 'AB1234C')
Subject(uri=u'/data/experiments/XNAT_S0001', label=u'AB1234C', id=u'XNAT_S0001',
project=u'MyProject')
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param label: XNAT Subject label
:type label: str
:param project: XNAT Subject Project
:type project: str
:returns: Subject objects
:rtype: :mod:`yaxil.Subject`
|
def add_vec_to_mat(mat, vec, axis=None, inplace=False,
target=None, substract=False):
""" Add a vector to a matrix
"""
assert mat.flags.c_contiguous
if axis is None:
if vec.shape[0] == mat.shape[0]:
axis = 0
elif vec.shape[0] == mat.shape[1]:
axis = 1
else:
raise ValueError('Vector length must be equal '
'to one side of the matrix')
n, m = mat.shape
block = (_compilation_constants['add_vec_block_size'],
_compilation_constants['add_vec_block_size'], 1)
gridx = ceil_div(n, block[0])
gridy = ceil_div(m, block[1])
grid = (gridx, gridy, 1)
if inplace:
target = mat
elif target is None:
target = gpuarray.empty_like(mat)
if axis == 0:
assert vec.shape[0] == mat.shape[0]
add_col_vec_kernel.prepared_call(
grid, block,
mat.gpudata,
vec.gpudata,
target.gpudata,
np.uint32(n),
np.uint32(m),
np.int32(substract))
elif axis == 1:
assert vec.shape[0] == mat.shape[1]
add_row_vec_kernel.prepared_call(
grid, block,
mat.gpudata,
vec.gpudata,
target.gpudata,
np.uint32(n),
np.uint32(m),
np.int32(substract))
return target
|
Add a vector to a matrix
|
def import_or_die(module_name, entrypoint_names):
'''
Import user code; return reference to usercode function.
(str) -> function reference
'''
log_debug("Importing {}".format(module_name))
module_name = os.path.abspath(module_name)
if module_name.endswith('.py'):
module_name,ext = os.path.splitext(module_name)
modname = os.path.basename(module_name)
dirname = os.path.dirname(module_name)
if dirname and dirname not in sys.path:
sys.path.append(dirname)
# first, try to reload code
if modname in sys.modules:
user_module = sys.modules.get(modname)
user_module = importlib.reload(user_module)
# if it isn't in sys.modules, load it for the first time, or
# try to.
else:
try:
mypaths = [ x for x in sys.path if ("Cellar" not in x and "packages" not in x)]
# print("Loading {} from {} ({})".format(modname, dirname, mypaths))
# user_module = importlib.import_module(modname)
user_module = importlib.__import__(modname)
except ImportError as e:
log_failure("Fatal error: couldn't import module (error: {}) while executing {}".format(str(e), modname))
raise ImportError(e)
# if there aren't any functions to call into, then the caller
# just wanted the module/code to be imported, and that's it.
if not entrypoint_names:
return
existing_names = dir(user_module)
for method in entrypoint_names:
if method in existing_names:
return getattr(user_module, method)
if len(entrypoint_names) > 1:
entrypoints = "one of {}".format(', '.join(entrypoint_names))
else:
entrypoints = entrypoint_names[0]
raise ImportError("Required entrypoint function or symbol ({}) not found in your code".format(entrypoints))
|
Import user code; return reference to usercode function.
(str) -> function reference
|
def _compute_std_dev(self, X):
"""Computes the standard deviation of a Gaussian Distribution with mean vector X[i]"""
self._sigma = []
if X.shape[0] <= 1:
self._sigma = [0.0]
else:
for x_mean in range(X.shape[0]):
std_dev = np.sqrt(sum([np.linalg.norm(x - x_mean) ** 2 for x in X]) / float(X.shape[0]-1))
self._sigma.append(std_dev)
return self._sigma
|
Computes the standard deviation of a Gaussian Distribution with mean vector X[i]
|
def click_partial_link_text(self, partial_link_text,
timeout=settings.SMALL_TIMEOUT):
""" This method clicks the partial link text on a page. """
# If using phantomjs, might need to extract and open the link directly
if self.timeout_multiplier and timeout == settings.SMALL_TIMEOUT:
timeout = self.__get_new_timeout(timeout)
if self.browser == 'phantomjs':
if self.is_partial_link_text_visible(partial_link_text):
element = self.wait_for_partial_link_text(partial_link_text)
element.click()
return
soup = self.get_beautiful_soup()
html_links = soup.fetch('a')
for html_link in html_links:
if partial_link_text in html_link.text:
for html_attribute in html_link.attrs:
if html_attribute[0] == 'href':
href = html_attribute[1]
if href.startswith('//'):
link = "http:" + href
elif href.startswith('/'):
url = self.driver.current_url
domain_url = self.get_domain_url(url)
link = domain_url + href
else:
link = href
self.open(link)
return
raise Exception(
'Could not parse link from partial link_text '
'{%s}' % partial_link_text)
raise Exception(
"Partial link text {%s} was not found!" % partial_link_text)
# Not using phantomjs
element = self.wait_for_partial_link_text(
partial_link_text, timeout=timeout)
self.__demo_mode_highlight_if_active(
partial_link_text, by=By.PARTIAL_LINK_TEXT)
pre_action_url = self.driver.current_url
try:
element.click()
except (StaleElementReferenceException, ENI_Exception):
self.wait_for_ready_state_complete()
time.sleep(0.05)
element = self.wait_for_partial_link_text(
partial_link_text, timeout=timeout)
element.click()
if settings.WAIT_FOR_RSC_ON_CLICKS:
self.wait_for_ready_state_complete()
if self.demo_mode:
if self.driver.current_url != pre_action_url:
self.__demo_mode_pause_if_active()
else:
self.__demo_mode_pause_if_active(tiny=True)
|
This method clicks the partial link text on a page.
|
def bind(self, fn: "Callable[[Any], Reader]") -> 'Reader':
r"""Bind a monadic function to the Reader.
Haskell:
Reader: m >>= k = Reader $ \r -> runReader (k (runReader m r)) r
Function: h >>= f = \w -> f (h w) w
"""
return Reader(lambda x: fn(self.run(x)).run(x))
|
r"""Bind a monadic function to the Reader.
Haskell:
Reader: m >>= k = Reader $ \r -> runReader (k (runReader m r)) r
Function: h >>= f = \w -> f (h w) w
|
def add_markdown_cell(self, text):
"""Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
"""
markdown_cell = {
"cell_type": "markdown",
"metadata": {},
"source": [rst2md(text)]
}
self.work_notebook["cells"].append(markdown_cell)
|
Add a markdown cell to the notebook
Parameters
----------
code : str
Cell content
|
def mouseReleaseEvent(self, event):
"""
Emits mouse_released signal.
:param event: QMouseEvent
"""
initial_state = event.isAccepted()
event.ignore()
self.mouse_released.emit(event)
if not event.isAccepted():
event.setAccepted(initial_state)
super(CodeEdit, self).mouseReleaseEvent(event)
|
Emits mouse_released signal.
:param event: QMouseEvent
|
def make_wcs_data_from_hpx_data(self, hpx_data, wcs, normalize=True):
""" Creates and fills a wcs map from the hpx data using the pre-calculated
mappings
hpx_data : the input HEALPix data
wcs : the WCS object
normalize : True -> perserve integral by splitting HEALPix values between bins
"""
wcs_data = np.zeros(wcs.npix)
self.fill_wcs_map_from_hpx_data(hpx_data, wcs_data, normalize)
return wcs_data
|
Creates and fills a wcs map from the hpx data using the pre-calculated
mappings
hpx_data : the input HEALPix data
wcs : the WCS object
normalize : True -> perserve integral by splitting HEALPix values between bins
|
def concurrent_slots(slots):
"""
Yields all concurrent slot indices.
"""
for i, slot in enumerate(slots):
for j, other_slot in enumerate(slots[i + 1:]):
if slots_overlap(slot, other_slot):
yield (i, j + i + 1)
|
Yields all concurrent slot indices.
|
def dot (self, other):
"""dot (self, other) -> number
Returns the dot product of this Point with another.
"""
if self.z:
return (self.x * other.x) + (self.y * other.y) + (self.z * other.z)
else:
return (self.x * other.x) + (self.y * other.y)
|
dot (self, other) -> number
Returns the dot product of this Point with another.
|
def yaw_pitch_roll(self):
"""Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention
Returns:
yaw: rotation angle around the z-axis in radians, in the range `[-pi, pi]`
pitch: rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]`
roll: rotation angle around the x''-axis in radians, in the range `[-pi, pi]`
The resulting rotation_matrix would be R = R_x(roll) R_y(pitch) R_z(yaw)
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
"""
self._normalise()
yaw = np.arctan2(2*(self.q[0]*self.q[3] - self.q[1]*self.q[2]),
1 - 2*(self.q[2]**2 + self.q[3]**2))
pitch = np.arcsin(2*(self.q[0]*self.q[2] + self.q[3]*self.q[1]))
roll = np.arctan2(2*(self.q[0]*self.q[1] - self.q[2]*self.q[3]),
1 - 2*(self.q[1]**2 + self.q[2]**2))
return yaw, pitch, roll
|
Get the equivalent yaw-pitch-roll angles aka. intrinsic Tait-Bryan angles following the z-y'-x'' convention
Returns:
yaw: rotation angle around the z-axis in radians, in the range `[-pi, pi]`
pitch: rotation angle around the y'-axis in radians, in the range `[-pi/2, -pi/2]`
roll: rotation angle around the x''-axis in radians, in the range `[-pi, pi]`
The resulting rotation_matrix would be R = R_x(roll) R_y(pitch) R_z(yaw)
Note:
This feature only makes sense when referring to a unit quaternion. Calling this method will implicitly normalise the Quaternion object to a unit quaternion if it is not already one.
|
def ellipsize(s, max_length=60):
"""
>>> print(ellipsize(u'lorem ipsum dolor sit amet', 40))
lorem ipsum dolor sit amet
>>> print(ellipsize(u'lorem ipsum dolor sit amet', 20))
lorem ipsum dolor...
"""
if len(s) > max_length:
ellipsis = '...'
return s[:(max_length - len(ellipsis))] + ellipsis
else:
return s
|
>>> print(ellipsize(u'lorem ipsum dolor sit amet', 40))
lorem ipsum dolor sit amet
>>> print(ellipsize(u'lorem ipsum dolor sit amet', 20))
lorem ipsum dolor...
|
def matches(self, txt: str) -> bool:
"""Determine whether txt matches pattern
:param txt: text to check
:return: True if match
"""
# rval = ref.getText()[1:-1].encode('utf-8').decode('unicode-escape')
if r'\\u' in self.pattern_re.pattern:
txt = txt.encode('utf-8').decode('unicode-escape')
match = self.pattern_re.match(txt)
return match is not None and match.end() == len(txt)
|
Determine whether txt matches pattern
:param txt: text to check
:return: True if match
|
def list(cls, path):
"""Return a list containing the names of the entries in the directory
given by path. The list is in arbitrary order.
"""
file_info = cls.parse_remote(path)
connection = cls.connect(path)
bucket = connection.get_bucket(file_info.bucket)
region = "@%s" % file_info.region if file_info.region else ""
output = []
for key in bucket.get_all_keys(prefix=file_info.key):
output.append(cls._S3_FILE % {"bucket": file_info.bucket,
"key": key.name,
"region": region})
return output
|
Return a list containing the names of the entries in the directory
given by path. The list is in arbitrary order.
|
def closeEvent(self, event):
"""Send last file signal on close event
:param event: The close event
:type event:
:returns: None
:rtype: None
:raises: None
"""
lf = self.browser.get_current_selection()
if lf:
self.last_file.emit(lf)
return super(GenesisWin, self).close()
|
Send last file signal on close event
:param event: The close event
:type event:
:returns: None
:rtype: None
:raises: None
|
def get_remote_file(url):
"""
Wrapper around ``request.get`` which nicely handles connection errors
"""
try:
return requests.get(url)
except requests.exceptions.ConnectionError as e:
print("Connection error!")
print(e.message.reason)
exit(1)
|
Wrapper around ``request.get`` which nicely handles connection errors
|
def delete_thing(self, lid):
"""Delete a Thing
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`lid` (required) (string) local identifier of the Thing you want to delete
"""
logger.info("delete_thing(lid=\"%s\")", lid)
evt = self.delete_thing_async(lid)
self._wait_and_except_if_failed(evt)
|
Delete a Thing
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`lid` (required) (string) local identifier of the Thing you want to delete
|
def affine(self, pixelbuffer=0):
"""
Return an Affine object of tile.
- pixelbuffer: tile buffer in pixels
"""
return Affine(
self.pixel_x_size,
0,
self.bounds(pixelbuffer).left,
0,
-self.pixel_y_size,
self.bounds(pixelbuffer).top
)
|
Return an Affine object of tile.
- pixelbuffer: tile buffer in pixels
|
def validateDtd(self, doc, dtd):
"""Try to validate the document against the dtd instance
Basically it does check all the definitions in the DtD.
Note the the internal subset (if present) is de-coupled
(i.e. not used), which could give problems if ID or IDREF
is present. """
if doc is None: doc__o = None
else: doc__o = doc._o
if dtd is None: dtd__o = None
else: dtd__o = dtd._o
ret = libxml2mod.xmlValidateDtd(self._o, doc__o, dtd__o)
return ret
|
Try to validate the document against the dtd instance
Basically it does check all the definitions in the DtD.
Note the the internal subset (if present) is de-coupled
(i.e. not used), which could give problems if ID or IDREF
is present.
|
def process_order(self, order):
"""Keep track of an order that was placed.
Parameters
----------
order : zp.Order
The order to record.
"""
try:
dt_orders = self._orders_by_modified[order.dt]
except KeyError:
self._orders_by_modified[order.dt] = OrderedDict([
(order.id, order),
])
self._orders_by_id[order.id] = order
else:
self._orders_by_id[order.id] = dt_orders[order.id] = order
# to preserve the order of the orders by modified date
move_to_end(dt_orders, order.id, last=True)
move_to_end(self._orders_by_id, order.id, last=True)
|
Keep track of an order that was placed.
Parameters
----------
order : zp.Order
The order to record.
|
def jwt_get_secret_key(payload=None):
"""
For enchanced security you may use secret key on user itself.
This way you have an option to logout only this user if:
- token is compromised
- password is changed
- etc.
"""
User = get_user_model() # noqa
if api_settings.JWT_GET_USER_SECRET_KEY:
user = User.objects.get(pk=payload.get('user_id'))
key = str(api_settings.JWT_GET_USER_SECRET_KEY(user))
return key
return api_settings.JWT_SECRET_KEY
|
For enchanced security you may use secret key on user itself.
This way you have an option to logout only this user if:
- token is compromised
- password is changed
- etc.
|
def auto_directory(rel_name):
"""
if you're using py.path you make do that as:
py.path.local(full_path).ensure_dir()
"""
dir_name = rel_path(rel_name, check=False)
if not os.path.exists(dir_name):
os.makedirs(dir_name, exist_ok=True)
return dir_name
|
if you're using py.path you make do that as:
py.path.local(full_path).ensure_dir()
|
def create_participant(worker_id, hit_id, assignment_id, mode):
"""Create a participant.
This route is hit early on. Any nodes the participant creates will be
defined in reference to the participant object. You must specify the
worker_id, hit_id, assignment_id, and mode in the url.
"""
# Lock the table, triggering multiple simultaneous accesses to fail
try:
session.connection().execute("LOCK TABLE participant IN EXCLUSIVE MODE NOWAIT")
except exc.OperationalError as e:
e.orig = TransactionRollbackError()
raise e
missing = [p for p in (worker_id, hit_id, assignment_id) if p == "undefined"]
if missing:
msg = "/participant POST: required values were 'undefined'"
return error_response(error_type=msg, status=403)
fingerprint_hash = request.args.get("fingerprint_hash")
try:
fingerprint_found = models.Participant.query.filter_by(
fingerprint_hash=fingerprint_hash
).one_or_none()
except MultipleResultsFound:
fingerprint_found = True
if fingerprint_hash and fingerprint_found:
db.logger.warning("Same browser fingerprint detected.")
if mode == "live":
return error_response(
error_type="/participant POST: Same participant dectected.", status=403
)
already_participated = models.Participant.query.filter_by(
worker_id=worker_id
).one_or_none()
if already_participated:
db.logger.warning("Worker has already participated.")
return error_response(
error_type="/participant POST: worker has already participated.", status=403
)
duplicate = models.Participant.query.filter_by(
assignment_id=assignment_id, status="working"
).one_or_none()
if duplicate:
msg = """
AWS has reused assignment_id while existing participant is
working. Replacing older participant {}.
"""
app.logger.warning(msg.format(duplicate.id))
q.enqueue(worker_function, "AssignmentReassigned", None, duplicate.id)
# Count working or beyond participants.
nonfailed_count = (
models.Participant.query.filter(
(models.Participant.status == "working")
| (models.Participant.status == "overrecruited")
| (models.Participant.status == "submitted")
| (models.Participant.status == "approved")
).count()
+ 1
)
recruiter_name = request.args.get("recruiter", "undefined")
if not recruiter_name or recruiter_name == "undefined":
recruiter = recruiters.from_config(_config())
if recruiter:
recruiter_name = recruiter.nickname
# Create the new participant.
participant = models.Participant(
recruiter_id=recruiter_name,
worker_id=worker_id,
assignment_id=assignment_id,
hit_id=hit_id,
mode=mode,
fingerprint_hash=fingerprint_hash,
)
exp = Experiment(session)
overrecruited = exp.is_overrecruited(nonfailed_count)
if overrecruited:
participant.status = "overrecruited"
session.add(participant)
session.flush() # Make sure we know the id for the new row
result = {"participant": participant.__json__()}
# Queue notification to others in waiting room
if exp.quorum:
quorum = {"q": exp.quorum, "n": nonfailed_count, "overrecruited": overrecruited}
db.queue_message(WAITING_ROOM_CHANNEL, dumps(quorum))
result["quorum"] = quorum
# return the data
return success_response(**result)
|
Create a participant.
This route is hit early on. Any nodes the participant creates will be
defined in reference to the participant object. You must specify the
worker_id, hit_id, assignment_id, and mode in the url.
|
def dusk(self, date=None, local=True, use_elevation=True):
"""Calculates the dusk time (the time in the evening when the sun is a
certain number of degrees below the horizon. By default this is 6
degrees but can be changed by setting the
:attr:`solar_depression` property.)
:param date: The date for which to calculate the dusk time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:param use_elevation: True = Return times that allow for the location's elevation;
False = Return times that don't use elevation.
If not specified then times will take elevation into account.
:type use_elevation: bool
:returns: The date and time at which dusk occurs.
:rtype: :class:`~datetime.datetime`
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
elevation = self.elevation if use_elevation else 0
dusk = self.astral.dusk_utc(date, self.latitude, self.longitude, observer_elevation=elevation)
if local:
return dusk.astimezone(self.tz)
else:
return dusk
|
Calculates the dusk time (the time in the evening when the sun is a
certain number of degrees below the horizon. By default this is 6
degrees but can be changed by setting the
:attr:`solar_depression` property.)
:param date: The date for which to calculate the dusk time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:param use_elevation: True = Return times that allow for the location's elevation;
False = Return times that don't use elevation.
If not specified then times will take elevation into account.
:type use_elevation: bool
:returns: The date and time at which dusk occurs.
:rtype: :class:`~datetime.datetime`
|
def _clear_dict(endpoint_props):
'''
Eliminates None entries from the features of the endpoint dict.
'''
return dict(
(prop_name, prop_val)
for prop_name, prop_val in six.iteritems(endpoint_props)
if prop_val is not None
)
|
Eliminates None entries from the features of the endpoint dict.
|
def allowed(self, method, _dict, allow):
"""
Only these items are allowed in the dictionary
"""
for key in _dict.keys():
if key not in allow:
raise LunrError("'%s' is not an argument for method '%s'"
% (key, method))
|
Only these items are allowed in the dictionary
|
def matrix_rank(a, tol=None, validate_args=False, name=None):
"""Compute the matrix rank; the number of non-zero SVD singular values.
Arguments:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as "zero".
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: "matrix_rank".
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
"""
with tf.compat.v1.name_scope(name, 'matrix_rank', [a, tol]):
a = tf.convert_to_tensor(value=a, dtype_hint=tf.float32, name='a')
assertions = _maybe_validate_matrix(a, validate_args)
if assertions:
with tf.control_dependencies(assertions):
a = tf.identity(a)
s = tf.linalg.svd(a, compute_uv=False)
if tol is None:
if a.shape[-2:].is_fully_defined():
m = np.max(a.shape[-2:].as_list())
else:
m = tf.reduce_max(input_tensor=tf.shape(input=a)[-2:])
eps = np.finfo(a.dtype.as_numpy_dtype).eps
tol = (eps * tf.cast(m, a.dtype) *
tf.reduce_max(input_tensor=s, axis=-1, keepdims=True))
return tf.reduce_sum(input_tensor=tf.cast(s > tol, tf.int32), axis=-1)
|
Compute the matrix rank; the number of non-zero SVD singular values.
Arguments:
a: (Batch of) `float`-like matrix-shaped `Tensor`(s) which are to be
pseudo-inverted.
tol: Threshold below which the singular value is counted as "zero".
Default value: `None` (i.e., `eps * max(rows, cols) * max(singular_val)`).
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: "matrix_rank".
Returns:
matrix_rank: (Batch of) `int32` scalars representing the number of non-zero
singular values.
|
def get_aggs(self):
"""
Compute the values for single valued aggregations
:returns: the single aggregation value
"""
res = self.fetch_aggregation_results()
if 'aggregations' in res and 'values' in res['aggregations'][str(self.parent_agg_counter - 1)]:
try:
agg = res['aggregations'][str(self.parent_agg_counter - 1)]['values']["50.0"]
if agg == 'NaN':
# ES returns NaN. Convert to None for matplotlib graph
agg = None
except Exception as e:
raise RuntimeError("Multivalue aggregation result not supported")
elif 'aggregations' in res and 'value' in res['aggregations'][str(self.parent_agg_counter - 1)]:
agg = res['aggregations'][str(self.parent_agg_counter - 1)]['value']
else:
agg = res['hits']['total']
return agg
|
Compute the values for single valued aggregations
:returns: the single aggregation value
|
def run(self, options):
"""
.. todo::
check network connection
:param Namespace options: parse result from argparse
:return:
"""
self.logger.debug("debug enabled...")
depends = ['git']
nil_tools = []
self.logger.info("depends list: %s", depends)
for v in depends:
real_path = shutil.which(v)
if real_path:
self.print_message("Found {}:{}..."
" {}".format(v,
real_path,
termcolor.colored(
'[OK]',
color='blue')))
else:
nil_tools.append(v)
self.error_message(
'Missing tool:`{}`... {}'.format(v, '[ERR]'), prefix='',
suffix='')
pass
if nil_tools:
self.print_message('')
self.error("please install missing tools...")
else:
self.print_message("\nNo error found,"
"you can use cliez in right way.")
self.logger.debug("check finished...")
pass
pass
|
.. todo::
check network connection
:param Namespace options: parse result from argparse
:return:
|
def _set_ospf(self, v, load=False):
"""
Setter method for ospf, mapped from YANG variable /rbridge_id/ipv6/router/ospf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrf",ospf.ospf, yang_name="ospf", rest_name="ospf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf', extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF) version 3', u'cli-run-template-enter': u' ipv6 router ospf$($(vrf)==default-vrf?: vrf $(vrf))\n', u'callpoint': u'Ospfv3Config', u'cli-suppress-list-no': None, u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-ipv6-router-ospf-vrf-$(vrf)'}}), is_container='list', yang_name="ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF) version 3', u'cli-run-template-enter': u' ipv6 router ospf$($(vrf)==default-vrf?: vrf $(vrf))\n', u'callpoint': u'Ospfv3Config', u'cli-suppress-list-no': None, u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-ipv6-router-ospf-vrf-$(vrf)'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ospf must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("vrf",ospf.ospf, yang_name="ospf", rest_name="ospf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf', extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF) version 3', u'cli-run-template-enter': u' ipv6 router ospf$($(vrf)==default-vrf?: vrf $(vrf))\n', u'callpoint': u'Ospfv3Config', u'cli-suppress-list-no': None, u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-ipv6-router-ospf-vrf-$(vrf)'}}), is_container='list', yang_name="ospf", rest_name="ospf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Open Shortest Path First (OSPF) version 3', u'cli-run-template-enter': u' ipv6 router ospf$($(vrf)==default-vrf?: vrf $(vrf))\n', u'callpoint': u'Ospfv3Config', u'cli-suppress-list-no': None, u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-ipv6-router-ospf-vrf-$(vrf)'}}, namespace='urn:brocade.com:mgmt:brocade-ospfv3', defining_module='brocade-ospfv3', yang_type='list', is_config=True)""",
})
self.__ospf = t
if hasattr(self, '_set'):
self._set()
|
Setter method for ospf, mapped from YANG variable /rbridge_id/ipv6/router/ospf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ospf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ospf() directly.
|
def join(self, timeout=None):
"""block until this thread terminates
.. note::
this method can block the calling coroutine if the thread has not
yet completed.
:param timeout:
the maximum time to wait. with the default of ``None``, waits
indefinitely
:type timeout: int, float or None
:raises:
`RuntimeError` if called inside the thread, or it has not yet been
started
"""
if not self._started:
raise RuntimeError("cannot join thread before it is started")
if compat.getcurrent() is self._glet:
raise RuntimeError("cannot join current thread")
self._finished.wait(timeout)
|
block until this thread terminates
.. note::
this method can block the calling coroutine if the thread has not
yet completed.
:param timeout:
the maximum time to wait. with the default of ``None``, waits
indefinitely
:type timeout: int, float or None
:raises:
`RuntimeError` if called inside the thread, or it has not yet been
started
|
def addComponentEditor(self):
"""Adds a new component to the model, and an editor for this component to this editor"""
row = self._model.rowCount()
comp_stack_editor = ExploreComponentEditor()
self.ui.trackStack.addWidget(comp_stack_editor)
idx_button = IndexButton(row)
idx_button.pickMe.connect(self.ui.trackStack.setCurrentIndex)
self.trackBtnGroup.addButton(idx_button)
self.ui.trackBtnLayout.addWidget(idx_button)
self.ui.trackStack.setCurrentIndex(row)
comp_stack_editor.closePlease.connect(self.removeComponentEditor)
delay = Silence()
comp_stack_editor.delaySpnbx.setValue(delay.duration())
self._model.insertComponent(delay, row,0)
self._allComponents.append([x() for x in self.stimuli_types if x.explore])
for stim in self._allComponents[row]:
editor = wrapComponent(stim).showEditor()
comp_stack_editor.addWidget(editor, stim.name)
exvocal = comp_stack_editor.widgetForName("Vocalization")
if exvocal is not None:
exvocal.filelistView.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
initcomp = self._allComponents[row][0]
self._model.insertComponent(initcomp, row, 1)
self.buttons.append(idx_button)
comp_stack_editor.exploreStimTypeCmbbx.currentIndexChanged.connect(lambda x : self.setStimIndex(row, x))
comp_stack_editor.delaySpnbx.valueChanged.connect(lambda x : self.setDelay(row, x))
comp_stack_editor.valueChanged.connect(self.valueChanged.emit)
return comp_stack_editor
|
Adds a new component to the model, and an editor for this component to this editor
|
def get_queryset(self, request):
"""
Returns a QuerySet of all model instances that can be edited by the
admin site.
"""
qs = self.model._default_manager.get_queryset()
ordering = self.get_ordering(request)
if ordering:
qs = qs.order_by(*ordering)
return qs
|
Returns a QuerySet of all model instances that can be edited by the
admin site.
|
def get_sequence(self):
"""Get the sequence number for a given account via Horizon.
:return: The current sequence number for a given account
:rtype: int
"""
if not self.address:
raise StellarAddressInvalidError('No address provided.')
address = self.horizon.account(self.address)
return int(address.get('sequence'))
|
Get the sequence number for a given account via Horizon.
:return: The current sequence number for a given account
:rtype: int
|
def run(self, lines):
"""Filter method"""
ret = []
for line in lines:
while True:
match = re.search(r'\[(.*?)\]\((.*?\.md)\)', line)
if match != None:
title = match.group(1)
line = re.sub(r'\[.*?\]\(.*?\.md\)', title, line, count=1)
else:
break
ret.append(line)
return ret
|
Filter method
|
def unpack_results(
data: bytes,
repetitions: int,
key_sizes: Sequence[Tuple[str, int]]
) -> Dict[str, np.ndarray]:
"""Unpack data from a bitstring into individual measurement results.
Args:
data: Packed measurement results, in the form <rep0><rep1>...
where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...
with bits packed in little-endian order in each byte.
repetitions: number of repetitions.
key_sizes: Keys and sizes of the measurements in the data.
Returns:
Dict mapping measurement key to a 2D array of boolean results. Each
array has shape (repetitions, size) with size for that measurement.
"""
bits_per_rep = sum(size for _, size in key_sizes)
total_bits = repetitions * bits_per_rep
byte_arr = np.frombuffer(data, dtype='uint8').reshape((len(data), 1))
bits = np.unpackbits(byte_arr, axis=1)[:, ::-1].reshape(-1).astype(bool)
bits = bits[:total_bits].reshape((repetitions, bits_per_rep))
results = {}
ofs = 0
for key, size in key_sizes:
results[key] = bits[:, ofs:ofs + size]
ofs += size
return results
|
Unpack data from a bitstring into individual measurement results.
Args:
data: Packed measurement results, in the form <rep0><rep1>...
where each repetition is <key0_0>..<key0_{size0-1}><key1_0>...
with bits packed in little-endian order in each byte.
repetitions: number of repetitions.
key_sizes: Keys and sizes of the measurements in the data.
Returns:
Dict mapping measurement key to a 2D array of boolean results. Each
array has shape (repetitions, size) with size for that measurement.
|
def print_struct(struct, ident=0):
"""
>>> from ctypes import *
>>> class Test(Structure):
... _fields_ = [('foo', c_int)]
...
>>> class Test2(Structure):
... _fields_ = [('foo', Test), ('bar', c_int)]
...
>>> t = Test2()
>>> t.foo.foo = 2
>>> t.bar = 1
>>> print_struct(t)
foo:
foo: 2
bar: 1
"""
if not isinstance(struct, (str, bytes, list, tuple)) and hasattr(struct, '__getitem__'): # array
print('[')
for item in struct:
print(" "*ident, end=' ')
print_struct(item, ident+1)
print(" "*ident + "]")
elif not hasattr(struct, '_fields_'):
print(struct)
else:
if ident:
print()
for name, _ in struct._fields_:
print(" "*ident + "{}:".format(name), end=' ')
print_struct(getattr(struct, name), ident+1)
|
>>> from ctypes import *
>>> class Test(Structure):
... _fields_ = [('foo', c_int)]
...
>>> class Test2(Structure):
... _fields_ = [('foo', Test), ('bar', c_int)]
...
>>> t = Test2()
>>> t.foo.foo = 2
>>> t.bar = 1
>>> print_struct(t)
foo:
foo: 2
bar: 1
|
def height(self):
"""Terminal height.
"""
if self.interactive:
if self._height is None:
self._height = self.term.height
return self._height
|
Terminal height.
|
def move_user_data(primary, secondary):
'''
Moves all submissions and other data linked to the secondary user into the primary user.
Nothing is deleted here, we just modify foreign user keys.
'''
# Update all submission authorships of the secondary to the primary
submissions = Submission.objects.filter(authors__id=secondary.pk)
for subm in submissions:
if subm.submitter == secondary:
subm.submitter = primary
subm.authors.remove(secondary)
subm.authors.add(primary)
subm.save()
# Transfer course registrations
try:
for course in secondary.profile.courses.all():
primary.profile.courses.add(course)
primary.profile.save()
except UserProfile.DoesNotExist:
# That's a database consistency problem, but he will go away anyway
pass
|
Moves all submissions and other data linked to the secondary user into the primary user.
Nothing is deleted here, we just modify foreign user keys.
|
def convert_bool(string):
"""Check whether string is boolean.
"""
if string == 'True':
return True, True
elif string == 'False':
return True, False
else:
return False, False
|
Check whether string is boolean.
|
def compose_suffix(num_docs=0, num_topics=0, suffix=None):
"""Create a short, informative, but not-so-unique identifying string for a trained model
If a str suffix is provided then just pass it through.
>>> compose_suffix(num_docs=100, num_topics=20)
'_100X20'
>>> compose_suffix(suffix='_sfx')
'_sfx'
>>> compose_suffix(suffix='')
''
>>> compose_suffix(suffix=None)
'_0X0'
"""
if not isinstance(suffix, basestring):
suffix = '_{}X{}'.format(num_docs, num_topics)
return suffix
|
Create a short, informative, but not-so-unique identifying string for a trained model
If a str suffix is provided then just pass it through.
>>> compose_suffix(num_docs=100, num_topics=20)
'_100X20'
>>> compose_suffix(suffix='_sfx')
'_sfx'
>>> compose_suffix(suffix='')
''
>>> compose_suffix(suffix=None)
'_0X0'
|
def _write_coco_results(self, _coco, detections):
""" example results
[{"image_id": 42,
"category_id": 18,
"bbox": [258.15,41.29,348.26,243.78],
"score": 0.236}, ...]
"""
cats = [cat['name'] for cat in _coco.loadCats(_coco.getCatIds())]
class_to_coco_ind = dict(zip(cats, _coco.getCatIds()))
results = []
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
logger.info('collecting %s results (%d/%d)' % (cls, cls_ind, self.num_classes - 1))
coco_cat_id = class_to_coco_ind[cls]
results.extend(self._coco_results_one_category(detections[cls_ind], coco_cat_id))
logger.info('writing results json to %s' % self._result_file)
with open(self._result_file, 'w') as f:
json.dump(results, f, sort_keys=True, indent=4)
|
example results
[{"image_id": 42,
"category_id": 18,
"bbox": [258.15,41.29,348.26,243.78],
"score": 0.236}, ...]
|
def fshdev(k):
"""
Generate a random draw from a Fisher distribution with mean declination
of 0 and inclination of 90 with a specified kappa.
Parameters
----------
k : kappa (precision parameter) of the distribution
k can be a single number or an array of values
Returns
----------
dec, inc : declination and inclination of random Fisher distribution draw
if k is an array, dec, inc are returned as arrays, otherwise, single values
"""
k = np.array(k)
if len(k.shape) != 0:
n = k.shape[0]
else:
n = 1
R1 = random.random(size=n)
R2 = random.random(size=n)
L = np.exp(-2 * k)
a = R1 * (1 - L) + L
fac = np.sqrt(-np.log(a)/(2 * k))
inc = 90. - np.degrees(2 * np.arcsin(fac))
dec = np.degrees(2 * np.pi * R2)
if n == 1:
return dec[0], inc[0] # preserve backward compatibility
else:
return dec, inc
|
Generate a random draw from a Fisher distribution with mean declination
of 0 and inclination of 90 with a specified kappa.
Parameters
----------
k : kappa (precision parameter) of the distribution
k can be a single number or an array of values
Returns
----------
dec, inc : declination and inclination of random Fisher distribution draw
if k is an array, dec, inc are returned as arrays, otherwise, single values
|
def export_to(self, appliance, location):
"""Exports the machine to an OVF appliance. See :py:class:`IAppliance` for the
steps required to export VirtualBox machines to OVF.
in appliance of type :class:`IAppliance`
Appliance to export this machine to.
in location of type str
The target location.
return description of type :class:`IVirtualSystemDescription`
VirtualSystemDescription object which is created for this machine.
"""
if not isinstance(appliance, IAppliance):
raise TypeError("appliance can only be an instance of type IAppliance")
if not isinstance(location, basestring):
raise TypeError("location can only be an instance of type basestring")
description = self._call("exportTo",
in_p=[appliance, location])
description = IVirtualSystemDescription(description)
return description
|
Exports the machine to an OVF appliance. See :py:class:`IAppliance` for the
steps required to export VirtualBox machines to OVF.
in appliance of type :class:`IAppliance`
Appliance to export this machine to.
in location of type str
The target location.
return description of type :class:`IVirtualSystemDescription`
VirtualSystemDescription object which is created for this machine.
|
def cat(
self,
source,
buffersize=None,
memsize=2 ** 24,
compressed=False,
encoding='UTF-8',
raw=False,
):
"""
Returns an iterator for the data in the key or nothing if the key
doesn't exist. Decompresses data on the fly (if compressed is True
or key ends with .gz) unless raw is True. Pass None for encoding to
skip encoding.
"""
assert self._is_s3(source) or isinstance(source, Key), 'source must be a valid s3 path'
key = self._get_key(source) if not isinstance(source, Key) else source
compressed = (compressed or key.name.endswith('.gz')) and not raw
if compressed:
decompress = zlib.decompressobj(16 + zlib.MAX_WBITS)
size = 0
bytes_read = 0
err = None
undecoded = ''
if key:
# try to read the file multiple times
for i in range(100):
obj = self.s3.Object(key.bucket.name, key.name)
buffersize = buffersize if buffersize is not None else 2 ** 20
if not size:
size = obj.content_length
elif size != obj.content_length:
raise AwsError('key size unexpectedly changed while reading')
r = obj.get(Range="bytes={}-".format(bytes_read))
try:
while bytes_read < size:
# this making this weird check because this call is
# about 100 times slower if the amt is too high
if size - bytes_read > buffersize:
bytes = r['Body'].read(amt=buffersize)
else:
bytes = r['Body'].read()
if compressed:
s = decompress.decompress(bytes)
else:
s = bytes
if encoding and not raw:
try:
decoded = undecoded + s.decode(encoding)
undecoded = ''
yield decoded
except UnicodeDecodeError:
undecoded += s
if len(undecoded) > memsize:
raise
else:
yield s
bytes_read += len(bytes)
except zlib.error:
logger.error("Error while decompressing [%s]", key.name)
raise
except UnicodeDecodeError:
raise
except Exception:
err = True
pass
if size <= bytes_read:
break
if size != bytes_read:
if err:
raise Exception
else:
raise AwsError('Failed to fully read [%s]' % source.name)
if undecoded:
assert encoding is not None # only time undecoded is set
# allow exception to be raised if one is thrown
decoded = undecoded.decode(encoding)
yield decoded
|
Returns an iterator for the data in the key or nothing if the key
doesn't exist. Decompresses data on the fly (if compressed is True
or key ends with .gz) unless raw is True. Pass None for encoding to
skip encoding.
|
def edit(self, entry, name, mark=False):
"""
Edit an entry (file or directory)
:param entry: :class:`.BaseFile` object
:param str name: new name for the entry
:param bool mark: whether to bookmark the entry
"""
fcid = None
if isinstance(entry, File):
fcid = entry.fid
elif isinstance(entry, Directory):
fcid = entry.cid
else:
raise APIError('Invalid BaseFile instance for an entry.')
is_mark = 0
if mark is True:
is_mark = 1
if self._req_files_edit(fcid, name, is_mark):
entry.reload()
return True
else:
raise APIError('Error editing the entry.')
|
Edit an entry (file or directory)
:param entry: :class:`.BaseFile` object
:param str name: new name for the entry
:param bool mark: whether to bookmark the entry
|
def create_default_users_and_perms():
"""
Adds the roles and perm to the DB. It adds only roles, perms and links between them that are not inside the db
It is possible adding new role or perm and connecting them just modifiying the following lists
"""
# perms = db.DBSession.query(Perm).all()
# if len(perms) > 0:
# return
default_perms = ( ("add_user", "Add User"),
("edit_user", "Edit User"),
("add_role", "Add Role"),
("edit_role", "Edit Role"),
("add_perm", "Add Permission"),
("edit_perm", "Edit Permission"),
("add_network", "Add network"),
("edit_network", "Edit network"),
("delete_network", "Delete network"),
("share_network", "Share network"),
("edit_topology", "Edit network topology"),
("add_project", "Add Project"),
("edit_project", "Edit Project"),
("delete_project", "Delete Project"),
("share_project", "Share Project"),
("edit_data", "Edit network data"),
("view_data", "View network data"),
("add_template", "Add Template"),
("edit_template", "Edit Template"),
("add_dimension", "Add Dimension"),
("update_dimension", "Update Dimension"),
("delete_dimension", "Delete Dimension"),
("add_unit", "Add Unit"),
("update_unit", "Update Unit"),
("delete_unit", "Delete Unit")
)
default_roles = (
("admin", "Administrator"),
("dev", "Developer"),
("modeller", "Modeller / Analyst"),
("manager", "Manager"),
("grad", "Graduate"),
("developer", "Developer"),
("decision", "Decision Maker"),
)
roleperms = (
# Admin permissions
('admin', "add_user"),
('admin', "edit_user"),
('admin', "add_role"),
('admin', "edit_role"),
('admin', "add_perm"),
('admin', "edit_perm"),
('admin', "add_network"),
('admin', "edit_network"),
('admin', "delete_network"),
('admin', "share_network"),
('admin', "add_project"),
('admin', "edit_project"),
('admin', "delete_project"),
('admin', "share_project"),
('admin', "edit_topology"),
('admin', "edit_data"),
('admin', "view_data"),
('admin', "add_template"),
('admin', "edit_template"),
('admin', "add_dimension"),
('admin', "update_dimension"),
('admin', "delete_dimension"),
('admin', "add_unit"),
('admin', "update_unit"),
('admin', "delete_unit"),
# Developer permissions
("developer", "add_network"),
("developer", "edit_network"),
("developer", "delete_network"),
("developer", "share_network"),
("developer", "add_project"),
("developer", "edit_project"),
("developer", "delete_project"),
("developer", "share_project"),
("developer", "edit_topology"),
("developer", "edit_data"),
("developer", "view_data"),
("developer", "add_template"),
("developer", "edit_template"),
('developer', "add_dimension"),
('developer', "update_dimension"),
('developer', "delete_dimension"),
('developer', "add_unit"),
('developer', "update_unit"),
('developer', "delete_unit"),
# modeller permissions
("modeller", "add_network"),
("modeller", "edit_network"),
("modeller", "delete_network"),
("modeller", "share_network"),
("modeller", "edit_topology"),
("modeller", "add_project"),
("modeller", "edit_project"),
("modeller", "delete_project"),
("modeller", "share_project"),
("modeller", "edit_data"),
("modeller", "view_data"),
# Manager permissions
("manager", "edit_data"),
("manager", "view_data"),
)
# Map for code to ID
id_maps_dict = {
"perm": {},
"role": {}
}
# Adding perms
perm_dict = {}
for code, name in default_perms:
perm = Perm(code=code, name=name)
perm_dict[code] = perm
perms_by_name = db.DBSession.query(Perm).filter(Perm.code==code).all()
if len(perms_by_name)==0:
# Adding perm
log.debug("# Adding PERM {}".format(code))
db.DBSession.add(perm)
db.DBSession.flush()
perm_by_name = db.DBSession.query(Perm).filter(Perm.code==code).one()
id_maps_dict["perm"][code] = perm_by_name.id
# Adding roles
role_dict = {}
for code, name in default_roles:
role = Role(code=code, name=name)
role_dict[code] = role
roles_by_name = db.DBSession.query(Role).filter(Role.code==code).all()
if len(roles_by_name)==0:
# Adding perm
log.debug("# Adding ROLE {}".format(code))
db.DBSession.add(role)
db.DBSession.flush()
role_by_name = db.DBSession.query(Role).filter(Role.code==code).one()
id_maps_dict["role"][code] = role_by_name.id
# Adding connections
for role_code, perm_code in roleperms:
#log.info("Link Role:{}({}) <---> Perm:{}({})".format(role_code, id_maps_dict["role"][role_code], perm_code, id_maps_dict["perm"][perm_code]))
links_found = db.DBSession.query(RolePerm).filter(RolePerm.role_id==id_maps_dict["role"][role_code]).filter(RolePerm.perm_id==id_maps_dict["perm"][perm_code]).all()
if len(links_found)==0:
# Adding link
log.debug("# Adding link")
roleperm = RolePerm()
# roleperm.role = role_dict[role_code]
# roleperm.perm = perm_dict[perm_code]
roleperm.role_id = id_maps_dict["role"][role_code]
roleperm.perm_id = id_maps_dict["perm"][perm_code]
db.DBSession.add(roleperm)
db.DBSession.flush()
db.DBSession.flush()
|
Adds the roles and perm to the DB. It adds only roles, perms and links between them that are not inside the db
It is possible adding new role or perm and connecting them just modifiying the following lists
|
def version(self):
"""
Return the version number of the Lending Club Investor tool
Returns
-------
string
The version number string
"""
this_path = os.path.dirname(os.path.realpath(__file__))
version_file = os.path.join(this_path, 'VERSION')
return open(version_file).read().strip()
|
Return the version number of the Lending Club Investor tool
Returns
-------
string
The version number string
|
def _decompress_dicom(dicom_file, output_file):
"""
This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion
:param input_file: single dicom file to decompress
"""
gdcmconv_executable = _get_gdcmconv()
subprocess.check_output([gdcmconv_executable, '-w', dicom_file, output_file])
|
This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion
:param input_file: single dicom file to decompress
|
def create_segments(self, segments):
"""Enqueue segment creates"""
for segment in segments:
s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE,
a_const.CREATE)
self.provision_queue.put(s_res)
|
Enqueue segment creates
|
def pyephem_earthsun_distance(time):
"""
Calculates the distance from the earth to the sun using pyephem.
Parameters
----------
time : pd.DatetimeIndex
Returns
-------
pd.Series. Earth-sun distance in AU.
"""
import ephem
sun = ephem.Sun()
earthsun = []
for thetime in time:
sun.compute(ephem.Date(thetime))
earthsun.append(sun.earth_distance)
return pd.Series(earthsun, index=time)
|
Calculates the distance from the earth to the sun using pyephem.
Parameters
----------
time : pd.DatetimeIndex
Returns
-------
pd.Series. Earth-sun distance in AU.
|
def get_max(array):
"""Get maximum value of an array. Automatically ignore invalid data.
**中文文档**
获得最大值。
"""
largest = -np.inf
for i in array:
try:
if i > largest:
largest = i
except:
pass
if np.isinf(largest):
raise ValueError("there's no numeric value in array!")
else:
return largest
|
Get maximum value of an array. Automatically ignore invalid data.
**中文文档**
获得最大值。
|
def _check_convergence(current_position,
next_position,
current_objective,
next_objective,
next_gradient,
grad_tolerance,
f_relative_tolerance,
x_tolerance):
"""Checks if the algorithm satisfies the convergence criteria."""
grad_converged = norm(next_gradient, dims=1) <= grad_tolerance
x_converged = norm(next_position - current_position, dims=1) <= x_tolerance
f_converged = (norm(next_objective - current_objective, dims=0) <=
f_relative_tolerance * current_objective)
return grad_converged | x_converged | f_converged
|
Checks if the algorithm satisfies the convergence criteria.
|
def _decode_image(fobj, session, filename):
"""Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
"""
buf = fobj.read()
image = tfds.core.lazy_imports.cv2.imdecode(
np.fromstring(buf, dtype=np.uint8), flags=3) # Note: Converts to RGB.
if image is None:
logging.warning(
"Image %s could not be decoded by OpenCV, falling back to TF", filename)
try:
image = tf.image.decode_image(buf, channels=3)
image = session.run(image)
except tf.errors.InvalidArgumentError:
logging.fatal("Image %s could not be decoded by Tensorflow", filename)
# The GIF images contain a single frame.
if len(image.shape) == 4: # rank=4 -> rank=3
image = image.reshape(image.shape[1:])
return image
|
Reads and decodes an image from a file object as a Numpy array.
The SUN dataset contains images in several formats (despite the fact that
all of them have .jpg extension). Some of them are:
- BMP (RGB)
- PNG (grayscale, RGBA, RGB interlaced)
- JPEG (RGB)
- GIF (1-frame RGB)
Since TFDS assumes that all images have the same number of channels, we
convert all of them to RGB.
Args:
fobj: File object to read from.
session: TF session used to decode the images.
filename: Filename of the original image in the archive.
Returns:
Numpy array with shape (height, width, channels).
|
def get_simulated_data(nmr_problems):
"""Simulate some data.
This returns the simulated tank observations and the corresponding ground truth maximum number of tanks.
Args:
nmr_problems (int): the number of problems
Returns:
tuple: (observations, nmr_tanks_ground_truth)
"""
# The number of tanks we observe per problem
nmr_observed_tanks = 10
# Generate some maximum number of tanks. Basically the ground truth of the estimation problem.
nmr_tanks_ground_truth = normal(nmr_problems, 1, mean=250, std=30, ctype='uint')
# Generate some random tank observations
observations = uniform(nmr_problems, nmr_observed_tanks, low=0, high=nmr_tanks_ground_truth, ctype='uint')
return observations, nmr_tanks_ground_truth
|
Simulate some data.
This returns the simulated tank observations and the corresponding ground truth maximum number of tanks.
Args:
nmr_problems (int): the number of problems
Returns:
tuple: (observations, nmr_tanks_ground_truth)
|
def get_instance(page_to_consume):
"""Return an instance of ConsumePage."""
global _instances
if isinstance(page_to_consume, basestring):
uri = page_to_consume
page_to_consume = page.get_instance(uri)
elif isinstance(page_to_consume, page.Page):
uri = page_to_consume.uri
else:
raise TypeError(
"get_instance() expects a parker.Page or basestring derivative."
)
page_to_consume.fetch()
parsed_page = parser.parse(page_to_consume)
try:
instance = _instances[uri]
except KeyError:
instance = ConsumePage(
parsed_page
)
_instances[uri] = instance
return instance
|
Return an instance of ConsumePage.
|
def _map_update_posterior(self):
"""Maximum A Posterior (MAP) update of HTFA parameters
Returns
-------
HTFA
Returns the instance itself.
"""
self.global_posterior_ = self.global_prior_.copy()
prior_centers = self.get_centers(self.global_prior_)
prior_widths = self.get_widths(self.global_prior_)
prior_centers_mean_cov = self.get_centers_mean_cov(self.global_prior_)
prior_widths_mean_var = self.get_widths_mean_var(self.global_prior_)
center_size = self.K * self.n_dim
posterior_size = center_size + self.K
for k in np.arange(self.K):
next_centers = np.zeros((self.n_dim, self.n_subj))
next_widths = np.zeros(self.n_subj)
for s in np.arange(self.n_subj):
center_start = s * posterior_size
width_start = center_start + center_size
start_idx = center_start + k * self.n_dim
end_idx = center_start + (k + 1) * self.n_dim
next_centers[:, s] = self.gather_posterior[start_idx:end_idx]\
.copy()
next_widths[s] = self.gather_posterior[width_start + k].copy()
# centers
posterior_mean, posterior_cov = self._map_update(
prior_centers[k].T.copy(),
from_tri_2_sym(prior_centers_mean_cov[k], self.n_dim),
self.global_centers_cov_scaled,
next_centers)
self.global_posterior_[k * self.n_dim:(k + 1) * self.n_dim] =\
posterior_mean.T
start_idx = self.map_offset[2] + k * self.cov_vec_size
end_idx = self.map_offset[2] + (k + 1) * self.cov_vec_size
self.global_posterior_[start_idx:end_idx] =\
from_sym_2_tri(posterior_cov)
# widths
common = 1.0 /\
(prior_widths_mean_var[k] + self.global_widths_var_scaled)
observation_mean = np.mean(next_widths)
tmp = common * self.global_widths_var_scaled
self.global_posterior_[self.map_offset[1] + k] = \
prior_widths_mean_var[k] * common * observation_mean +\
tmp * prior_widths[k]
self.global_posterior_[self.map_offset[3] + k] = \
prior_widths_mean_var[k] * tmp
return self
|
Maximum A Posterior (MAP) update of HTFA parameters
Returns
-------
HTFA
Returns the instance itself.
|
def delete_all(self):
'''Deletes all feature collections.
This does not destroy the ES index, but instead only
deletes all FCs with the configured document type
(defaults to ``fc``).
'''
try:
self.conn.indices.delete_mapping(
index=self.index, doc_type=self.type)
except TransportError:
logger.warn('type %r in index %r already deleted',
self.index, self.type, exc_info=True)
|
Deletes all feature collections.
This does not destroy the ES index, but instead only
deletes all FCs with the configured document type
(defaults to ``fc``).
|
def highpass(cutoff):
"""
This strategy uses an exponential approximation for cut-off frequency
calculation, found by matching the one-pole Laplace lowpass filter
and mirroring the resulting filter to get a highpass.
"""
R = thub(exp(cutoff - pi), 2)
return (1 - R) / (1 + R * z ** -1)
|
This strategy uses an exponential approximation for cut-off frequency
calculation, found by matching the one-pole Laplace lowpass filter
and mirroring the resulting filter to get a highpass.
|
def fit_df(self, labels, dfs, pstate_col=PSTATE_COL):
"""
Fit the classifier with labels y and DataFrames dfs
"""
assert len(labels) == len(dfs)
for label in set(labels):
label_dfs = [s for l,s in zip(labels, dfs) if l == label]
pohmm = self.pohmm_factory()
pohmm.fit_df(label_dfs, pstate_col=pstate_col)
self.pohmms[label] = pohmm
return self
|
Fit the classifier with labels y and DataFrames dfs
|
def _request(self, url, params={}):
"""Makes a request using the currently open session.
:param url: A url fragment to use in the creation of the master url
"""
r = self._session.get(url=url, params=params, headers=DEFAULT_ORIGIN)
return r
|
Makes a request using the currently open session.
:param url: A url fragment to use in the creation of the master url
|
def cross_entropy_error(self, input_data, targets, average=True,
cache=None, prediction=False,
sum_errors=True):
""" Computes the cross-entropy error for all tasks.
"""
loss = []
if cache is None:
cache = self.n_tasks * [None]
for targets_task, cache_task, task in \
izip(targets, cache, self.tasks):
loss.append(task.cross_entropy_error(
input_data, targets_task, average=average,
cache=cache_task,
prediction=prediction))
if sum_errors:
return sum(loss)
else:
return loss
|
Computes the cross-entropy error for all tasks.
|
def install(cls, uninstallable, prefix, path_items, root=None, warning=None):
"""Install an importer for modules found under ``path_items`` at the given import ``prefix``.
:param bool uninstallable: ``True`` if the installed importer should be uninstalled and any
imports it performed be un-imported when ``uninstall`` is called.
:param str prefix: The import prefix the installed importer will be responsible for.
:param path_items: The paths relative to ``root`` containing modules to expose for import under
``prefix``.
:param str root: The root path of the distribution containing the vendored code. NB: This is the
the path to the pex code, which serves as the root under which code is vendored
at ``pex/vendor/_vendored``.
:param str warning: An optional warning to emit if any imports are made through the installed
importer.
:return:
"""
root = cls._abs_root(root)
importables = tuple(cls._iter_importables(root=root, path_items=path_items, prefix=prefix))
vendor_importer = cls(root=root,
importables=importables,
uninstallable=uninstallable,
warning=warning)
sys.meta_path.insert(0, vendor_importer)
_tracer().log('Installed {}'.format(vendor_importer), V=3)
return vendor_importer
|
Install an importer for modules found under ``path_items`` at the given import ``prefix``.
:param bool uninstallable: ``True`` if the installed importer should be uninstalled and any
imports it performed be un-imported when ``uninstall`` is called.
:param str prefix: The import prefix the installed importer will be responsible for.
:param path_items: The paths relative to ``root`` containing modules to expose for import under
``prefix``.
:param str root: The root path of the distribution containing the vendored code. NB: This is the
the path to the pex code, which serves as the root under which code is vendored
at ``pex/vendor/_vendored``.
:param str warning: An optional warning to emit if any imports are made through the installed
importer.
:return:
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.