code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def stop_step(self, step_name):
""" Stop a step. """
if self.finished is not None:
raise AlreadyFinished()
steps = copy.deepcopy(self.steps)
step_data = self._get_step(step_name, steps=steps)
if step_data is None:
raise StepNotStarted()
elif 'stop' in step_data:
raise StepAlreadyFinished()
step_data['stop'] = datetime.utcnow()
step_data['duration'] = util.timedelta_total_seconds(step_data['stop'] - step_data['start'])
self._save(steps=steps)
|
Stop a step.
|
def get_html(self,
url,
params=None,
cache_cb=None,
decoder_encoding=None,
decoder_errors=url_specified_decoder.ErrorsHandle.strict,
**kwargs):
"""
Get html of an url.
"""
response = self.get(
url=url,
params=params,
cache_cb=cache_cb,
**kwargs
)
return url_specified_decoder.decode(
binary=response.content,
url=response.url,
encoding=decoder_encoding,
errors=decoder_errors,
)
|
Get html of an url.
|
def create_binary_descriptor(streamer):
"""Create a packed binary descriptor of a DataStreamer object.
Args:
streamer (DataStreamer): The streamer to create a packed descriptor for
Returns:
bytes: A packed 14-byte streamer descriptor.
"""
trigger = 0
if streamer.automatic:
trigger = 1
elif streamer.with_other is not None:
trigger = (1 << 7) | streamer.with_other
return struct.pack("<8sHBBBx", streamer.dest.encode(), streamer.selector.encode(), trigger, streamer.KnownFormats[streamer.format], streamer.KnownTypes[streamer.report_type])
|
Create a packed binary descriptor of a DataStreamer object.
Args:
streamer (DataStreamer): The streamer to create a packed descriptor for
Returns:
bytes: A packed 14-byte streamer descriptor.
|
def common_wire_version(self):
"""Minimum of all servers' max wire versions, or None."""
servers = self.known_servers
if servers:
return min(s.max_wire_version for s in self.known_servers)
return None
|
Minimum of all servers' max wire versions, or None.
|
def _read_composites(self, compositor_nodes):
"""Read (generate) composites."""
keepables = set()
for item in compositor_nodes:
self._generate_composite(item, keepables)
return keepables
|
Read (generate) composites.
|
def _calculateBasalLearning(self,
activeColumns,
burstingColumns,
correctPredictedCells,
activeBasalSegments,
matchingBasalSegments,
basalPotentialOverlaps):
"""
Basic Temporal Memory learning. Correctly predicted cells always have
active basal segments, and we learn on these segments. In bursting
columns, we either learn on an existing basal segment, or we grow a new one.
The only influence apical dendrites have on basal learning is: the apical
dendrites influence which cells are considered "predicted". So an active
apical dendrite can prevent some basal segments in active columns from
learning.
@param correctPredictedCells (numpy array)
@param burstingColumns (numpy array)
@param activeBasalSegments (numpy array)
@param matchingBasalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@return (tuple)
- learningActiveBasalSegments (numpy array)
Active basal segments on correct predicted cells
- learningMatchingBasalSegments (numpy array)
Matching basal segments selected for learning in bursting columns
- basalSegmentsToPunish (numpy array)
Basal segments that should be punished for predicting an inactive column
- newBasalSegmentCells (numpy array)
Cells in bursting columns that were selected to grow new basal segments
- learningCells (numpy array)
Cells that have learning basal segments or are selected to grow a basal
segment
"""
# Correctly predicted columns
learningActiveBasalSegments = self.basalConnections.filterSegmentsByCell(
activeBasalSegments, correctPredictedCells)
cellsForMatchingBasal = self.basalConnections.mapSegmentsToCells(
matchingBasalSegments)
matchingCells = np.unique(cellsForMatchingBasal)
(matchingCellsInBurstingColumns,
burstingColumnsWithNoMatch) = np2.setCompare(
matchingCells, burstingColumns, matchingCells / self.cellsPerColumn,
rightMinusLeft=True)
learningMatchingBasalSegments = self._chooseBestSegmentPerColumn(
self.basalConnections, matchingCellsInBurstingColumns,
matchingBasalSegments, basalPotentialOverlaps, self.cellsPerColumn)
newBasalSegmentCells = self._getCellsWithFewestSegments(
self.basalConnections, self.rng, burstingColumnsWithNoMatch,
self.cellsPerColumn)
learningCells = np.concatenate(
(correctPredictedCells,
self.basalConnections.mapSegmentsToCells(learningMatchingBasalSegments),
newBasalSegmentCells))
# Incorrectly predicted columns
correctMatchingBasalMask = np.in1d(
cellsForMatchingBasal / self.cellsPerColumn, activeColumns)
basalSegmentsToPunish = matchingBasalSegments[~correctMatchingBasalMask]
return (learningActiveBasalSegments,
learningMatchingBasalSegments,
basalSegmentsToPunish,
newBasalSegmentCells,
learningCells)
|
Basic Temporal Memory learning. Correctly predicted cells always have
active basal segments, and we learn on these segments. In bursting
columns, we either learn on an existing basal segment, or we grow a new one.
The only influence apical dendrites have on basal learning is: the apical
dendrites influence which cells are considered "predicted". So an active
apical dendrite can prevent some basal segments in active columns from
learning.
@param correctPredictedCells (numpy array)
@param burstingColumns (numpy array)
@param activeBasalSegments (numpy array)
@param matchingBasalSegments (numpy array)
@param basalPotentialOverlaps (numpy array)
@return (tuple)
- learningActiveBasalSegments (numpy array)
Active basal segments on correct predicted cells
- learningMatchingBasalSegments (numpy array)
Matching basal segments selected for learning in bursting columns
- basalSegmentsToPunish (numpy array)
Basal segments that should be punished for predicting an inactive column
- newBasalSegmentCells (numpy array)
Cells in bursting columns that were selected to grow new basal segments
- learningCells (numpy array)
Cells that have learning basal segments or are selected to grow a basal
segment
|
def fmt_routes(bottle_app):
"""Return a pretty formatted string of the list of routes."""
routes = [(r.method, r.rule) for r in bottle_app.routes]
if not routes:
return
string = 'Routes:\n'
string += fmt_pairs(routes, sort_key=operator.itemgetter(1))
return string
|
Return a pretty formatted string of the list of routes.
|
async def start_component_in_thread(executor, workload: CoroutineFunction[T], *args: Any, loop=None, **kwargs: Any) -> Component[T]:
"""\
Starts the passed `workload` with additional `commands` and `events` pipes.
The workload will be executed on an event loop in a new thread; the thread is provided by `executor`.
This function is not compatible with `ProcessPoolExecutor`,
as references between the workload and component are necessary.
Be careful when using an executor with a maximum number of threads,
as long running workloads may starve other tasks.
Consider using a dedicated executor that can spawn at least as many threads
as concurrent long-running tasks are expected.
"""
loop = loop or asyncio.get_event_loop()
commands_a, commands_b = pipe(loop=loop)
events_a, events_b = pipe(loop=loop)
commands_b = ConcurrentPipeEnd(commands_b, loop=loop)
events_b = ConcurrentPipeEnd(events_b, loop=loop)
_workload = workload(*args, commands=commands_b, events=events_b, **kwargs)
future = cast(_Future[T], loop.run_in_executor(executor, asyncio.run, _workload))
component = Component[T](commands_a, events_a, future)
await component.wait_for_start()
return component
|
\
Starts the passed `workload` with additional `commands` and `events` pipes.
The workload will be executed on an event loop in a new thread; the thread is provided by `executor`.
This function is not compatible with `ProcessPoolExecutor`,
as references between the workload and component are necessary.
Be careful when using an executor with a maximum number of threads,
as long running workloads may starve other tasks.
Consider using a dedicated executor that can spawn at least as many threads
as concurrent long-running tasks are expected.
|
def match_uriinfo(cls, info):
"""
:param info: an :py:class:`~httpretty.core.URIInfo`
:returns: a 2-item tuple: (:py:class:`~httpretty.core.URLMatcher`, :py:class:`~httpretty.core.URIInfo`) or ``(None, [])``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.matches(info):
return (matcher, info)
return (None, [])
|
:param info: an :py:class:`~httpretty.core.URIInfo`
:returns: a 2-item tuple: (:py:class:`~httpretty.core.URLMatcher`, :py:class:`~httpretty.core.URIInfo`) or ``(None, [])``
|
def blend(self, clr, factor=0.5):
"""
Returns a mix of two colors.
"""
r = self.r * (1 - factor) + clr.r * factor
g = self.g * (1 - factor) + clr.g * factor
b = self.b * (1 - factor) + clr.b * factor
a = self.a * (1 - factor) + clr.a * factor
return Color(r, g, b, a, mode="rgb")
|
Returns a mix of two colors.
|
def make_spiral_texture(spirals=6.0, ccw=False, offset=0.0, resolution=1000):
"""Makes a texture consisting of a spiral from the origin.
Args:
spirals (float): the number of rotations to make
ccw (bool): make spirals counter-clockwise (default is clockwise)
offset (float): if non-zero, spirals start offset by this amount
resolution (int): number of midpoints along the spiral
Returns:
A texture.
"""
dist = np.sqrt(np.linspace(0., 1., resolution))
if ccw:
direction = 1.
else:
direction = -1.
angle = dist * spirals * np.pi * 2. * direction
spiral_texture = (
(np.cos(angle) * dist / 2.) + 0.5,
(np.sin(angle) * dist / 2.) + 0.5
)
return spiral_texture
|
Makes a texture consisting of a spiral from the origin.
Args:
spirals (float): the number of rotations to make
ccw (bool): make spirals counter-clockwise (default is clockwise)
offset (float): if non-zero, spirals start offset by this amount
resolution (int): number of midpoints along the spiral
Returns:
A texture.
|
def get(self, oid):
"""
Get a single OID value.
"""
snmpsecurity = self._get_snmp_security()
try:
engine_error, pdu_error, pdu_error_index, objects = self._cmdgen.getCmd(
snmpsecurity,
cmdgen.UdpTransportTarget((self.host, self.port), timeout=self.timeout,
retries=self.retries),
oid,
)
except Exception as e:
raise SNMPError(e)
if engine_error:
raise SNMPError(engine_error)
if pdu_error:
raise SNMPError(pdu_error.prettyPrint())
_, value = objects[0]
value = _convert_value_to_native(value)
return value
|
Get a single OID value.
|
def validate(self,
_portfolio,
account,
algo_datetime,
_algo_current_data):
"""
Make validation checks if we are after the deadline.
Fail if the leverage is less than the min leverage.
"""
if (algo_datetime > self.deadline and
account.leverage < self.min_leverage):
self.fail()
|
Make validation checks if we are after the deadline.
Fail if the leverage is less than the min leverage.
|
def get_race_card(self, market_ids, data_entries=None, session=None, lightweight=None):
"""
Returns a list of race cards based on market ids provided.
:param list market_ids: The filter to select desired markets
:param str data_entries: Data to be returned
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceCard]
"""
if not self.app_key:
raise RaceCardError("You need to login before requesting a race_card\n"
"APIClient.race_card.login()")
params = self.create_race_card_req(market_ids, data_entries)
(response, elapsed_time) = self.request(params=params, session=session)
return self.process_response(response, resources.RaceCard, elapsed_time, lightweight)
|
Returns a list of race cards based on market ids provided.
:param list market_ids: The filter to select desired markets
:param str data_entries: Data to be returned
:param requests.session session: Requests session object
:param bool lightweight: If True will return dict not a resource
:rtype: list[resources.RaceCard]
|
def native(s):
"""
Convert :py:class:`bytes` or :py:class:`unicode` to the native
:py:class:`str` type, using UTF-8 encoding if conversion is necessary.
:raise UnicodeError: The input string is not UTF-8 decodeable.
:raise TypeError: The input is neither :py:class:`bytes` nor
:py:class:`unicode`.
"""
if not isinstance(s, (binary_type, text_type)):
raise TypeError("%r is neither bytes nor unicode" % s)
if PY3:
if isinstance(s, binary_type):
return s.decode("utf-8")
else:
if isinstance(s, text_type):
return s.encode("utf-8")
return s
|
Convert :py:class:`bytes` or :py:class:`unicode` to the native
:py:class:`str` type, using UTF-8 encoding if conversion is necessary.
:raise UnicodeError: The input string is not UTF-8 decodeable.
:raise TypeError: The input is neither :py:class:`bytes` nor
:py:class:`unicode`.
|
def register(self, key, value, type_info):
""" Registers a callable with the specified key and type info.
`key`
String key to identify a callable.
`value`
Callable object.
`type_info`
Dictionary with type information about the value provided.
"""
# check for existing action
old_action = self._actions.get(key)
# update existing type info if value hasn't changed
if old_action == value and key in self._type_info:
self._type_info[key].update(type_info)
else:
self._type_info[key] = dict(type_info)
super(ExtRegistry, self).register(key, value)
|
Registers a callable with the specified key and type info.
`key`
String key to identify a callable.
`value`
Callable object.
`type_info`
Dictionary with type information about the value provided.
|
def _get_chart(self, chart_type, x=None, y=None, style=None, opts=None,
label=None, options={}, **kwargs):
"""
Get a full chart object
"""
sbcharts = ["density", "distribution", "dlinear"]
acharts = ["tick", "circle", "text", "line_num", "bar_num"]
if chart_type in sbcharts:
self._set_seaborn_engine()
if chart_type in acharts:
self._set_altair_engine()
if chart_type != "sline":
x, y = self._check_fields(x, y)
if opts is None:
opts = self.chart_opts
if style is None:
style = self.chart_style
if self.engine == "bokeh":
func = self._get_bokeh_chart
elif self.engine == "altair":
func = self._get_altair_chart
elif self.engine == "chartjs":
func = self._get_chartjs_chart
elif self.engine == "seaborn":
func = self._get_seaborn_chart
else:
self.err("Engine " + self.engine + " unknown")
return
try:
chart = func(
x, y, chart_type, label, opts, style,
options=options, **kwargs)
return chart
except Exception as e:
self.err(e)
|
Get a full chart object
|
def entry_at(cls, filepath, index):
""":return: RefLogEntry at the given index
:param filepath: full path to the index file from which to read the entry
:param index: python list compatible index, i.e. it may be negative to
specify an entry counted from the end of the list
:raise IndexError: If the entry didn't exist
.. note:: This method is faster as it only parses the entry at index, skipping
all other lines. Nonetheless, the whole file has to be read if
the index is negative
"""
fp = open(filepath, 'rb')
if index < 0:
return RefLogEntry.from_line(fp.readlines()[index].strip())
else:
# read until index is reached
for i in xrange(index + 1):
line = fp.readline()
if not line:
break
# END abort on eof
# END handle runup
if i != index or not line:
raise IndexError
# END handle exception
return RefLogEntry.from_line(line.strip())
|
:return: RefLogEntry at the given index
:param filepath: full path to the index file from which to read the entry
:param index: python list compatible index, i.e. it may be negative to
specify an entry counted from the end of the list
:raise IndexError: If the entry didn't exist
.. note:: This method is faster as it only parses the entry at index, skipping
all other lines. Nonetheless, the whole file has to be read if
the index is negative
|
def set_max_attempts(self, value):
"""stub"""
if value is None:
raise InvalidArgument('value must be an integer')
if value is not None and not isinstance(value, int):
raise InvalidArgument('value is not an integer')
if not self.my_osid_object_form._is_valid_integer(value,
self.get_max_attempts_metadata()):
raise InvalidArgument('value must be an integer')
self.my_osid_object_form._my_map['maxAttempts'] = value
|
stub
|
def set_vibration(self, left_motor, right_motor, duration):
"""Control the speed of both motors seperately or together.
left_motor and right_motor arguments require a number between
0 (off) and 1 (full).
duration is miliseconds, e.g. 1000 for a second."""
if WIN:
self._set_vibration_win(left_motor, right_motor, duration)
elif NIX:
self._set_vibration_nix(left_motor, right_motor, duration)
else:
raise NotImplementedError
|
Control the speed of both motors seperately or together.
left_motor and right_motor arguments require a number between
0 (off) and 1 (full).
duration is miliseconds, e.g. 1000 for a second.
|
def is_in_schedule_mode(self):
"""Returns True if base_station is currently on a scheduled mode."""
resource = "schedule"
mode_event = self.publish_and_get_event(resource)
if mode_event and mode_event.get("resource", None) == "schedule":
properties = mode_event.get('properties')
return properties.get("active", False)
return False
|
Returns True if base_station is currently on a scheduled mode.
|
def _changes(name,
uid=None,
gid=None,
groups=None,
optional_groups=None,
remove_groups=True,
home=None,
createhome=True,
password=None,
enforce_password=True,
empty_password=False,
shell=None,
fullname='',
roomnumber='',
workphone='',
homephone='',
other='',
loginclass=None,
date=None,
mindays=0,
maxdays=999999,
inactdays=0,
warndays=7,
expire=None,
win_homedrive=None,
win_profile=None,
win_logonscript=None,
win_description=None,
allow_uid_change=False,
allow_gid_change=False):
'''
Return a dict of the changes required for a user if the user is present,
otherwise return False.
Updated in 2015.8.0 to include support for windows homedrive, profile,
logonscript, and description fields.
Updated in 2014.7.0 to include support for shadow attributes, all
attributes supported as integers only.
'''
if 'shadow.info' in __salt__:
lshad = __salt__['shadow.info'](name)
lusr = __salt__['user.info'](name)
if not lusr:
return False
change = {}
if groups is None:
groups = lusr['groups']
wanted_groups = sorted(set((groups or []) + (optional_groups or [])))
if uid and lusr['uid'] != uid:
change['uid'] = uid
if gid is not None and lusr['gid'] not in (gid, __salt__['file.group_to_gid'](gid)):
change['gid'] = gid
default_grp = __salt__['file.gid_to_group'](
gid if gid is not None else lusr['gid']
)
# remove the default group from the list for comparison purposes
if default_grp in lusr['groups']:
lusr['groups'].remove(default_grp)
if name in lusr['groups'] and name not in wanted_groups:
lusr['groups'].remove(name)
# remove default group from wanted_groups, as this requirement is
# already met
if default_grp in wanted_groups:
wanted_groups.remove(default_grp)
if _group_changes(lusr['groups'], wanted_groups, remove_groups):
change['groups'] = wanted_groups
if home and lusr['home'] != home:
change['home'] = home
if createhome:
newhome = home if home else lusr['home']
if newhome is not None and not os.path.isdir(newhome):
change['homeDoesNotExist'] = newhome
if shell and lusr['shell'] != shell:
change['shell'] = shell
if 'shadow.info' in __salt__ and 'shadow.default_hash' in __salt__:
if password and not empty_password:
default_hash = __salt__['shadow.default_hash']()
if lshad['passwd'] == default_hash \
or lshad['passwd'] != default_hash and enforce_password:
if lshad['passwd'] != password:
change['passwd'] = password
if empty_password and lshad['passwd'] != '':
change['empty_password'] = True
if date is not None and lshad['lstchg'] != date:
change['date'] = date
if mindays is not None and lshad['min'] != mindays:
change['mindays'] = mindays
if maxdays is not None and lshad['max'] != maxdays:
change['maxdays'] = maxdays
if inactdays is not None and lshad['inact'] != inactdays:
change['inactdays'] = inactdays
if warndays is not None and lshad['warn'] != warndays:
change['warndays'] = warndays
if expire and lshad['expire'] != expire:
change['expire'] = expire
elif 'shadow.info' in __salt__ and salt.utils.platform.is_windows():
if expire and expire is not -1 and salt.utils.dateutils.strftime(lshad['expire']) != salt.utils.dateutils.strftime(expire):
change['expire'] = expire
# GECOS fields
fullname = salt.utils.data.decode(fullname)
lusr['fullname'] = salt.utils.data.decode(lusr['fullname'])
if fullname is not None and lusr['fullname'] != fullname:
change['fullname'] = fullname
if win_homedrive and lusr['homedrive'] != win_homedrive:
change['homedrive'] = win_homedrive
if win_profile and lusr['profile'] != win_profile:
change['profile'] = win_profile
if win_logonscript and lusr['logonscript'] != win_logonscript:
change['logonscript'] = win_logonscript
if win_description and lusr['description'] != win_description:
change['description'] = win_description
# MacOS doesn't have full GECOS support, so check for the "ch" functions
# and ignore these parameters if these functions do not exist.
if 'user.chroomnumber' in __salt__ \
and roomnumber is not None:
roomnumber = salt.utils.data.decode(roomnumber)
lusr['roomnumber'] = salt.utils.data.decode(lusr['roomnumber'])
if lusr['roomnumber'] != roomnumber:
change['roomnumber'] = roomnumber
if 'user.chworkphone' in __salt__ \
and workphone is not None:
workphone = salt.utils.data.decode(workphone)
lusr['workphone'] = salt.utils.data.decode(lusr['workphone'])
if lusr['workphone'] != workphone:
change['workphone'] = workphone
if 'user.chhomephone' in __salt__ \
and homephone is not None:
homephone = salt.utils.data.decode(homephone)
lusr['homephone'] = salt.utils.data.decode(lusr['homephone'])
if lusr['homephone'] != homephone:
change['homephone'] = homephone
if 'user.chother' in __salt__ and other is not None:
other = salt.utils.data.decode(other)
lusr['other'] = salt.utils.data.decode(lusr['other'])
if lusr['other'] != other:
change['other'] = other
# OpenBSD/FreeBSD login class
if __grains__['kernel'] in ('OpenBSD', 'FreeBSD'):
if loginclass:
if __salt__['user.get_loginclass'](name) != loginclass:
change['loginclass'] = loginclass
errors = []
if not allow_uid_change and 'uid' in change:
errors.append(
'Changing uid ({0} -> {1}) not permitted, set allow_uid_change to '
'True to force this change. Note that this will not change file '
'ownership.'.format(lusr['uid'], uid)
)
if not allow_gid_change and 'gid' in change:
errors.append(
'Changing gid ({0} -> {1}) not permitted, set allow_gid_change to '
'True to force this change. Note that this will not change file '
'ownership.'.format(lusr['gid'], gid)
)
if errors:
raise CommandExecutionError(
'Encountered error checking for needed changes',
info=errors
)
return change
|
Return a dict of the changes required for a user if the user is present,
otherwise return False.
Updated in 2015.8.0 to include support for windows homedrive, profile,
logonscript, and description fields.
Updated in 2014.7.0 to include support for shadow attributes, all
attributes supported as integers only.
|
def backward_committor(T, A, B, mu=None):
r"""Backward committor between given sets.
The backward committor u(x) between sets A and B is the
probability for the chain starting in x to have come from A last
rather than from B.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
mu : (M, ) ndarray (optional)
Stationary vector
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I)
u_{i}=1 for i \in A (II)
u_{i}=0 for i \in B (III)
with adjoint of the generator matrix K=(D_pi(P-I))'.
"""
X = set(range(T.shape[0]))
A = set(A)
B = set(B)
AB = A.intersection(B)
notAB = X.difference(A).difference(B)
if len(AB) > 0:
raise ValueError("Sets A and B have to be disjoint")
if mu is None:
mu = stationary_distribution(T)
K = np.transpose(mu[:, np.newaxis] * (T - np.eye(T.shape[0])))
"""Assemble left-hand side W for linear system"""
"""Equation (I)"""
W = 1.0 * K
"""Equation (II)"""
W[list(A), :] = 0.0
W[list(A), list(A)] = 1.0
"""Equation (III)"""
W[list(B), :] = 0.0
W[list(B), list(B)] = 1.0
"""Assemble right-hand side r for linear system"""
"""Equation (I)+(III)"""
r = np.zeros(T.shape[0])
"""Equation (II)"""
r[list(A)] = 1.0
u = solve(W, r)
return u
|
r"""Backward committor between given sets.
The backward committor u(x) between sets A and B is the
probability for the chain starting in x to have come from A last
rather than from B.
Parameters
----------
T : (M, M) ndarray
Transition matrix
A : array_like
List of integer state labels for set A
B : array_like
List of integer state labels for set B
mu : (M, ) ndarray (optional)
Stationary vector
Returns
-------
u : (M, ) ndarray
Vector of forward committor probabilities
Notes
-----
The forward committor is a solution to the following
boundary-value problem
.. math::
\sum_j K_{ij} \pi_{j} u_{j}=0 for i in X\(A u B) (I)
u_{i}=1 for i \in A (II)
u_{i}=0 for i \in B (III)
with adjoint of the generator matrix K=(D_pi(P-I))'.
|
def cache(self, checkvalidity=True, staleonly=False, allowraise=True):
""" Re-caches the Symbol's datatable by querying each Feed.
Parameters
----------
checkvalidity : bool, optional
Optionally, check validity post-cache. Improve speed by
turning to False.
staleonly : bool, default False
Set to True, for speed up, by looking at staleness
allowraise : bool, default True
AND with the Symbol.handle and Feed.handle's 'raise',
set to False, to do a list of symbols. Note, this
won't silence bugs in Trump, eg. unhandled edge cases.
So, those still need to be handled by the application.
Returns
-------
SymbolReport
"""
note = "staleonly = {}".format(staleonly)
self._log_an_event('CACHE','START',note)
docache = True
if staleonly:
lc = self.last_cache()
if lc:
freshthresh = self.freshthresh
nw = dt.datetime.now()
freshness = (nw - lc).total_seconds() / 60.0
if freshness <= freshthresh:
docache = False
smrp = SymbolReport(self.name)
if docache:
data = []
cols = ['final', 'override_feed000', 'failsafe_feed999']
if len(self.feeds) == 0:
err_msg = "Symbol has no Feeds. Can't cache a feed-less Symbol."
raise Exception(err_msg)
try:
datt = datadefs[self.dtype.datadef]
indtt = indexingtypes[self.index.indimp]
indkwargs = self.index.getkwargs()
indt = indtt(self.index.case, **indkwargs)
rp = ReportPoint('datadef', 'class', datt)
smrp.add_reportpoint(rp)
for afeed in self.feeds:
fdrp = afeed.cache(allowraise)
smrp.add_feedreport(fdrp)
tmp = datt(afeed.data).converted
tmp = indt.process_post_feed_cache(tmp)
data.append(tmp)
cols.append(afeed.data.name)
except:
point = "caching"
smrp = self._generic_exception(point, smrp, allowraise)
try:
data = pd.concat(data, axis=1)
except:
point = "concatenation"
smrp = self._generic_exception(point, smrp, allowraise)
# We shouldn't need to do anything here, as the concatenation
# should be smooth...
# preindlen = len(data)
#
#
# if preindlen > 0 :
# #indt = indtt(data, self.index.case, indkwargs)
# #data = indt.final_dataframe()
# data = indt.process_post_concat(data)
#
# postindlen = len(data)
# if postindlen == 0 and preindlen > 0:
# raise Exception("Indexing Implementer likely poorly designed")
# else:
# postindlen = 0
def build_hi_df(which, colname):
objs = object_session(self)
qry = objs.query(which.ind,
func.max(which.dt_log).label('max_dt_log'))
qry = qry.filter_by(symname = self.name)
grb = qry.group_by(which.ind).subquery()
qry = objs.query(which)
ords = qry.join((grb, and_(which.ind == grb.c.ind,
which.dt_log == grb.c.max_dt_log))).all()
if len(ords):
orind = [row.ind for row in ords]
orval = [row.val for row in ords]
ordf = indt.build_ordf(orind, orval, colname)
else:
ordf = pd.DataFrame(columns=[colname])
return ordf
ordf = build_hi_df(Override, 'override_feed000')
fsdf = build_hi_df(FailSafe, 'failsafe_feed999')
orfsdf = pd.merge(ordf, fsdf, how='outer', left_index=True, right_index=True)
data = pd.merge(orfsdf, data, how='outer', left_index=True, right_index=True)
data = indt.process_post_orfs(data)
try:
data = data.fillna(value=pd.np.nan)
data = data[sorted_feed_cols(data)]
data['final'] = FeedAggregator(self.agg_method).aggregate(data)
except:
point = "aggregation"
smrp = self._generic_exception(point, smrp, allowraise)
# SQLAQ There are several states to deal with at this point
# A) the datatable exists but a feed has been added
# B) the datatable doesn't exist and needs to be created
# C) the datatable needs to be updated for more or less feeds
# D) the datatable_exists flag is incorrect because all edge cases
# haven't been handled yet.
#
# My logic is that once Trump is more functional, I'll be able to
# eliminate this hacky solution. But, SQLAlchemy might have
# a more elegant answer. A check, of somekind prior to deletion?
# if not self.datatable_exists:
# self._init_datatable() #older version of _init_datatable
# delete(self.datatable).execute()
# self._init_datatable() #older version of _init_datatable
# Is this the best way to check?
# if engine.dialect.has_table(session.connection(), self.name):
# delete(self.datatable).execute()
self._refresh_datatable_schema()
if len(data) > 0:
data.index.name = 'indx'
data = data.reset_index()
datarecords = data.to_dict(orient='records')
objs = object_session(self)
objs.execute(self.datatable.insert(), datarecords)
objs.commit()
if checkvalidity:
try:
isvalid, reports = self.check_validity(report=True)
for rep in reports:
smrp.add_reportpoint(rep)
if not isvalid:
raise Exception('{} is not valid'.format(self.name))
except:
point = "validity_check"
smrp = self._generic_exception(point, smrp, allowraise)
self._log_an_event('CACHE','COMPLETE', "Fresh!")
else:
self._log_an_event('CACHE','FRESH', "Was still fresh")
return smrp
|
Re-caches the Symbol's datatable by querying each Feed.
Parameters
----------
checkvalidity : bool, optional
Optionally, check validity post-cache. Improve speed by
turning to False.
staleonly : bool, default False
Set to True, for speed up, by looking at staleness
allowraise : bool, default True
AND with the Symbol.handle and Feed.handle's 'raise',
set to False, to do a list of symbols. Note, this
won't silence bugs in Trump, eg. unhandled edge cases.
So, those still need to be handled by the application.
Returns
-------
SymbolReport
|
def create(self, key, value):
"""
Create a new VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Newly created VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
"""
data = values.of({'Key': key, 'Value': value, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return VariableInstance(
self._version,
payload,
service_sid=self._solution['service_sid'],
environment_sid=self._solution['environment_sid'],
)
|
Create a new VariableInstance
:param unicode key: The key
:param unicode value: The value
:returns: Newly created VariableInstance
:rtype: twilio.rest.serverless.v1.service.environment.variable.VariableInstance
|
def _get_team_results(self, team_result_html):
"""
Extract the winning or losing team's name and abbreviation.
Depending on which team's data field is passed (either the winner or
loser), return the name and abbreviation of that team to denote which
team won and which lost the game.
Parameters
----------
team_result_html : PyQuery object
A PyQuery object representing either the winning or losing team's
data field within the boxscore.
Returns
-------
tuple
Returns a tuple of the team's name followed by the abbreviation.
"""
link = [i for i in team_result_html('td a').items()]
# If there are no links, the boxscore is likely misformed and can't be
# parsed. In this case, the boxscore should be skipped.
if len(link) < 1:
return None
name, abbreviation = self._get_name(link[0])
return name, abbreviation
|
Extract the winning or losing team's name and abbreviation.
Depending on which team's data field is passed (either the winner or
loser), return the name and abbreviation of that team to denote which
team won and which lost the game.
Parameters
----------
team_result_html : PyQuery object
A PyQuery object representing either the winning or losing team's
data field within the boxscore.
Returns
-------
tuple
Returns a tuple of the team's name followed by the abbreviation.
|
def _check_label_or_level_ambiguity(self, key, axis=0):
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
"""
if self.ndim > 2:
raise NotImplementedError(
"_check_label_or_level_ambiguity is not implemented for {type}"
.format(type=type(self)))
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (key is not None and
is_hashable(key) and
key in self.axes[axis].names and
any(key in self.axes[ax] for ax in other_axes)):
# Build an informative and grammatical warning
level_article, level_type = (('an', 'index')
if axis == 0 else
('a', 'column'))
label_article, label_type = (('a', 'column')
if axis == 0 else
('an', 'index'))
msg = ("'{key}' is both {level_article} {level_type} level and "
"{label_article} {label_type} label, which is ambiguous."
).format(key=key,
level_article=level_article,
level_type=level_type,
label_article=label_article,
label_type=label_type)
raise ValueError(msg)
|
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
|
def rename_columns(self, col):
""" Rename columns of dataframe.
Parameters
----------
col : list(str)
List of columns to rename.
"""
try:
self.cleaned_data.columns = col
except Exception as e:
raise e
|
Rename columns of dataframe.
Parameters
----------
col : list(str)
List of columns to rename.
|
def get_account_from_name(self, name):
"""
Returns the account with the given name.
:type name: string
:param name: The name of the account.
"""
for account in self.accounts:
if account.get_name() == name:
return account
return None
|
Returns the account with the given name.
:type name: string
:param name: The name of the account.
|
def add_default_initial_conditions(self, value=None):
"""Set default initial conditions in the PySB model.
Parameters
----------
value : Optional[float]
Optionally a value can be supplied which will be the initial
amount applied. Otherwise a built-in default is used.
"""
if value is not None:
try:
value_num = float(value)
except ValueError:
logger.error('Invalid initial condition value.')
return
else:
value_num = self.default_initial_amount
if self.model is None:
return
for m in self.model.monomers:
set_base_initial_condition(self.model, m, value_num)
|
Set default initial conditions in the PySB model.
Parameters
----------
value : Optional[float]
Optionally a value can be supplied which will be the initial
amount applied. Otherwise a built-in default is used.
|
def do_fuzzyindex(self, word):
"""Compute fuzzy extensions of word that exist in index.
FUZZYINDEX lilas"""
word = list(preprocess_query(word))[0]
token = Token(word)
neighbors = make_fuzzy(token)
neighbors = [(n, DB.zcard(dbkeys.token_key(n))) for n in neighbors]
neighbors.sort(key=lambda n: n[1], reverse=True)
for token, freq in neighbors:
if freq == 0:
break
print(white(token), blue(freq))
|
Compute fuzzy extensions of word that exist in index.
FUZZYINDEX lilas
|
def random_word(tokens, tokenizer):
"""
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
"""
output_label = []
for i, token in enumerate(tokens):
prob = random.random()
# mask token with 15% probability
if prob < 0.15:
prob /= 0.15
# 80% randomly change token to mask token
if prob < 0.8:
tokens[i] = "[MASK]"
# 10% randomly change token to random token
elif prob < 0.9:
tokens[i] = random.choice(list(tokenizer.vocab.items()))[0]
# -> rest 10% randomly keep current token
# append current token to output (we will predict these later)
try:
output_label.append(tokenizer.vocab[token])
except KeyError:
# For unknown words (should not occur with BPE vocab)
output_label.append(tokenizer.vocab["[UNK]"])
logger.warning("Cannot find token '{}' in vocab. Using [UNK] insetad".format(token))
else:
# no masking token (will be ignored by loss function later)
output_label.append(-1)
return tokens, output_label
|
Masking some random tokens for Language Model task with probabilities as in the original BERT paper.
:param tokens: list of str, tokenized sentence.
:param tokenizer: Tokenizer, object used for tokenization (we need it's vocab here)
:return: (list of str, list of int), masked tokens and related labels for LM prediction
|
def makephy(data, samples, longname):
""" builds phy output. If large files writes 50000 loci at a time to tmp
files and rebuilds at the end"""
## order names
names = [i.name for i in samples]
names.sort()
## read in loci file
locifile = os.path.join(data.dirs.outfiles, data.name+".loci")
locus = iter(open(locifile, 'rb'))
## dict for saving the full matrix
fdict = {name:[] for name in names}
## list for saving locus number and locus range for partitions
partitions = []
initial_pos = 1
## remove empty column sites and append edited seqs to dict F
done = 0
nloci = 0
nbases = 0
## TODO: This should be fixed. it cycles through reading each locus
## until nloci is less than this large number. It should really just
## read to the end of the file, so it'll do all loci no matter how
## many there are.
while nloci < 5000000:
seqs = []
#arrayed = np.array([])
anames = []
while 1:
## get next locus
try:
samp = locus.next()
except StopIteration:
done = 1
break
if "//" in samp:
nloci += 1
break
else:
try:
name, seq = samp.split()
except ValueError:
print samp
anames.append(name[1:])
seqs.append(seq.strip())
## reset
arrayed = np.array([list(i) for i in seqs])
if done:
break
## create mask for columns that are empty or
## that are paired-end separators (compatible w/ pyrad v2 and v3)
#mask = [i for i in range(len(arrayed.T)) if np.any([
## still surely a better way to vectorize this...
mask = [i for i in arrayed.T if any([j not in list("-Nn") for j in i])]
masked = np.dstack(mask)[0]
## partition information
loc_name = "p"+str(nloci)
loc_range = str(initial_pos) + "-" +\
str(len(masked[0]) + initial_pos -1)
initial_pos += len(masked[0])
partitions.append(loc_name+"="+loc_range)
## uncomment to print block info (used to partition by locus)
#blockend += minray
#print blockend,
#print loc
#print arrayed
## append data to dict
for name in names:
if name in anames:
#fdict[name].append(arrayed[anames.index(name), mask].tostring())
fdict[name].append(masked[anames.index(name),:].tostring())
else:
fdict[name].append("N"*masked.shape[1])
#fdict[name].append("N"*len(arrayed[0, mask]))
## add len to total length
nbases += len(fdict[name][-1])
## after x iterations tmp pickle fdict?
if not nloci % 1e4:
## concat strings
for name in fdict:
with open(os.path.join(assembly.dirs.outfiles , "tmp",
"{}_{}.phy.tmp".format(name, nloci)), 'wb') as wout:
wout.write("".join(fdict[name]))
del fdict
fdict = {name:[] for name in names}
## print out .PHY file, if really big, pull form multiple tmp pickle
superout = open(os.path.join( assembly.dirs.outfiles, assembly.name+".phy" ), 'wb')
print >>superout, len(names), nbases
if nloci < 1e4:
for name in names:
print >>superout, name+(" "*((longname+3)-\
len(name)))+"".join(fdict[name])
else:
for name in names:
superout.write("{}{}{}".format(
name,
" "*((longname+3)-len(name)),
"".join(fdict[name])))
tmpfiles = glob.glob(os.path.join(assembly.dirs.outfiles, "tmp", name+"*.phy.tmp"))
tmpfiles.sort()
for tmpf in tmpfiles:
with open(tmpf, 'rb') as tmpin:
superout.write(tmpin.read())
os.remove(tmpf)
superout.write("\n")
superout.close()
raxml_part_out = open(os.path.join(assembly.dirs.outfiles, assembly.name+".phy.partitions"), 'w')
for partition in partitions:
print >>raxml_part_out, "DNA, %s" % (partition)
raxml_part_out.close()
return partitions
|
builds phy output. If large files writes 50000 loci at a time to tmp
files and rebuilds at the end
|
def get(self):
"""
*do the frankenstein magic!*
"""
self.log.info('starting the ``get`` method')
self._copy_folder_and_get_directory_listings()
self._join_all_filenames_and_text()
self._collect_placeholders_required()
self._populate_dynamic_placeholders()
self._fill_placeholders_from_settings()
self._request_remaining_placeholders()
self._populate_placeholders_in_files()
self._move_template_to_destination(ignoreExisting=self.ignoreExisting)
self.log.info('completed the ``get`` method')
return None
|
*do the frankenstein magic!*
|
def hexstr(x, onlyasc=0, onlyhex=0, color=False):
"""Build a fancy tcpdump like hex from bytes."""
x = bytes_encode(x)
_sane_func = sane_color if color else sane
s = []
if not onlyasc:
s.append(" ".join("%02X" % orb(b) for b in x))
if not onlyhex:
s.append(_sane_func(x))
return " ".join(s)
|
Build a fancy tcpdump like hex from bytes.
|
def _get_stream(filename, openfunction=open, mode='r'):
"""Return open stream if *filename* can be opened with *openfunction* or else ``None``."""
try:
stream = openfunction(filename, mode=mode)
except (IOError, OSError) as err:
# An exception might be raised due to two reasons, first the openfunction is unable to open the file, in this
# case we have to ignore the error and return None. Second is when openfunction can't open the file because
# either the file isn't there or the permissions don't allow access.
if errno.errorcode[err.errno] in ['ENOENT', 'EACCES']:
six.reraise(*sys.exc_info())
return None
if mode.startswith('r'):
# additional check for reading (eg can we uncompress) --- is this needed?
try:
stream.readline()
except IOError:
stream.close()
stream = None
except:
stream.close()
raise
else:
stream.close()
stream = openfunction(filename, mode=mode)
return stream
|
Return open stream if *filename* can be opened with *openfunction* or else ``None``.
|
def get_canonical_key_id(self, key_id):
"""
get_canonical_key_id is used by get_canonical_key, see the comment
for that method for more explanation.
Keyword arguments:
key_id -- the key id (e.g. '12345')
returns the canonical key id (e.g. '12')
"""
shard_num = self.get_shard_num_by_key_id(key_id)
return self._canonical_keys[shard_num]
|
get_canonical_key_id is used by get_canonical_key, see the comment
for that method for more explanation.
Keyword arguments:
key_id -- the key id (e.g. '12345')
returns the canonical key id (e.g. '12')
|
def x(self):
"""Block the main thead until future finish, return the future.result()."""
with self._condition:
result = None
if not self.done():
self._condition.wait(self._timeout)
if not self.done():
# timeout
self.set_exception(TimeoutError())
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
# cancelled
result = CancelledError()
elif self._state == FINISHED:
# finished
if self._exception:
result = self._exception
else:
result = self._result
if isinstance(result, Exception):
if self.catch_exception:
result = FailureException(result)
return result
else:
raise result
return result
|
Block the main thead until future finish, return the future.result().
|
def next_frame_basic_stochastic_discrete():
"""Basic 2-frame conv model with stochastic discrete latent."""
hparams = basic_deterministic_params.next_frame_sampling()
hparams.batch_size = 4
hparams.video_num_target_frames = 6
hparams.scheduled_sampling_mode = "prob_inverse_lin"
hparams.scheduled_sampling_decay_steps = 40000
hparams.scheduled_sampling_max_prob = 1.0
hparams.dropout = 0.15
hparams.filter_double_steps = 3
hparams.hidden_size = 96
hparams.learning_rate_constant = 0.002
hparams.learning_rate_warmup_steps = 2000
hparams.learning_rate_schedule = "linear_warmup * constant"
hparams.concat_internal_states = True
hparams.video_modality_loss_cutoff = 0.03
hparams.add_hparam("bottleneck_bits", 128)
hparams.add_hparam("bottleneck_noise", 0.1)
hparams.add_hparam("discretize_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_warmup_steps", 40000)
hparams.add_hparam("latent_rnn_max_sampling", 0.5)
hparams.add_hparam("latent_use_max_probability", 0.8)
hparams.add_hparam("full_latent_tower", False)
hparams.add_hparam("latent_predictor_state_size", 128)
hparams.add_hparam("latent_predictor_temperature", 1.0)
hparams.add_hparam("complex_addn", True)
hparams.add_hparam("recurrent_state_size", 64)
return hparams
|
Basic 2-frame conv model with stochastic discrete latent.
|
def unregister(self, svc_ref):
# type: (ServiceReference) -> Any
"""
Unregisters a service
:param svc_ref: A service reference
:return: The unregistered service instance
:raise BundleException: Unknown service reference
"""
with self.__svc_lock:
try:
# Try in pending services
return self.__pending_services.pop(svc_ref)
except KeyError:
# Not pending: continue
pass
if svc_ref not in self.__svc_registry:
raise BundleException("Unknown service: {0}".format(svc_ref))
# Get the owner
bundle = svc_ref.get_bundle()
# Get the service instance
service = self.__svc_registry.pop(svc_ref)
for spec in svc_ref.get_property(OBJECTCLASS):
spec_services = self.__svc_specs[spec]
# Use bisect to remove the reference (faster)
idx = bisect.bisect_left(spec_services, svc_ref)
del spec_services[idx]
if not spec_services:
del self.__svc_specs[spec]
# Remove the service factory
if svc_ref.is_factory():
# Call unget_service for all client bundle
factory, svc_reg = self.__svc_factories.pop(svc_ref)
for counter in self.__factory_usage.values():
counter.cleanup_service(factory, svc_reg)
else:
# Delete bundle association
bundle_services = self.__bundle_svc[bundle]
bundle_services.remove(svc_ref)
if not bundle_services:
# Don't keep empty lists
del self.__bundle_svc[bundle]
return service
|
Unregisters a service
:param svc_ref: A service reference
:return: The unregistered service instance
:raise BundleException: Unknown service reference
|
def date_range_builder(self, start='2013-02-11', end=None):
"""
Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String
"""
if not end:
end = time.strftime('%Y-%m-%d')
return 'acquisitionDate:[%s+TO+%s]' % (start, end)
|
Builds date range query.
:param start:
Date string. format: YYYY-MM-DD
:type start:
String
:param end:
date string. format: YYYY-MM-DD
:type end:
String
:returns:
String
|
def remove_node(self, p_id, remove_unconnected_nodes=True):
""" Removes a node from the graph. """
if self.has_node(p_id):
for neighbor in self.incoming_neighbors(p_id):
self._edges[neighbor].remove(p_id)
neighbors = set()
if remove_unconnected_nodes:
neighbors = self.outgoing_neighbors(p_id)
del self._edges[p_id]
for neighbor in neighbors:
if self.is_isolated(neighbor):
self.remove_node(neighbor)
|
Removes a node from the graph.
|
def _get_lineage(self, tax_id, merge_obsolete=True):
"""Return a list of [(rank, tax_id)] describing the lineage of
tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been
replaced, use the corresponding value in table merged.
"""
# Be sure we aren't working with an obsolete tax_id
if merge_obsolete:
tax_id = self._get_merged(tax_id)
# Note: joining with ranks seems like a no-op, but for some
# reason it results in a faster query using sqlite, as well as
# an ordering from leaf --> root. Might be a better idea to
# sort explicitly if this is the expected behavior, but it
# seems like for the most part, the lineage is converted to a
# dict and the order is irrelevant.
cmd = """
WITH RECURSIVE a AS (
SELECT tax_id, parent_id, rank
FROM {nodes}
WHERE tax_id = {}
UNION ALL
SELECT p.tax_id, p.parent_id, p.rank
FROM a JOIN {nodes} p ON a.parent_id = p.tax_id
)
SELECT a.rank, a.tax_id FROM a
JOIN {ranks} using(rank)
""".format(self.placeholder, nodes=self.nodes, ranks=self.ranks_table)
# with some versions of sqlite3, an error is raised when no
# rows are returned; with others, an empty list is returned.
try:
with self.engine.connect() as con:
result = con.execute(cmd, (tax_id,))
# reorder so that root is first
lineage = result.fetchall()[::-1]
except sqlalchemy.exc.ResourceClosedError:
lineage = []
if not lineage:
raise ValueError('tax id "{}" not found'.format(tax_id))
return lineage
|
Return a list of [(rank, tax_id)] describing the lineage of
tax_id. If ``merge_obsolete`` is True and ``tax_id`` has been
replaced, use the corresponding value in table merged.
|
def do_ls(self, nothing = ''):
"""list files in current remote directory"""
for d in self.dirs:
self.stdout.write("\033[0;34m" + ('%s\n' % d) + "\033[0m")
for f in self.files:
self.stdout.write('%s\n' % f)
|
list files in current remote directory
|
def run_benchmarks(dir, models, wav, alphabet, lm_binary=None, trie=None, iters=-1):
r'''
Core of the running of the benchmarks. We will run on all of models, against
the WAV file provided as wav, and the provided alphabet.
'''
assert_valid_dir(dir)
inference_times = [ ]
for model in models:
model_filename = model
current_model = {
'name': model,
'iters': [ ],
'mean': numpy.infty,
'stddev': numpy.infty
}
if lm_binary and trie:
cmdline = './deepspeech --model "%s" --alphabet "%s" --lm "%s" --trie "%s" --audio "%s" -t' % (model_filename, alphabet, lm_binary, trie, wav)
else:
cmdline = './deepspeech --model "%s" --alphabet "%s" --audio "%s" -t' % (model_filename, alphabet, wav)
for it in range(iters):
sys.stdout.write('\rRunning %s: %d/%d' % (os.path.basename(model), (it+1), iters))
sys.stdout.flush()
rc, stdout, stderr = exec_command(cmdline, cwd=dir)
if rc == 0:
inference_time = float(stdout.split('\n')[1].split('=')[-1])
# print("[%d] model=%s inference=%f" % (it, model, inference_time))
current_model['iters'].append(inference_time)
else:
print('exec_command("%s") failed with rc=%d' % (cmdline, rc))
print('stdout: %s' % stdout)
print('stderr: %s' % stderr)
raise AssertionError('Execution failure: rc=%d' % (rc))
sys.stdout.write('\n')
sys.stdout.flush()
current_model['mean'] = numpy.mean(current_model['iters'])
current_model['stddev'] = numpy.std(current_model['iters'])
inference_times.append(current_model)
return inference_times
|
r'''
Core of the running of the benchmarks. We will run on all of models, against
the WAV file provided as wav, and the provided alphabet.
|
def http2time(text):
"""Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
"""
# fast exit for strictly conforming string
m = STRICT_DATE_RE.search(text)
if m:
g = m.groups()
mon = MONTHS_LOWER.index(g[1].lower()) + 1
tt = (int(g[2]), mon, int(g[0]),
int(g[3]), int(g[4]), float(g[5]))
return _timegm(tt)
# No, we need some messy parsing...
# clean up
text = text.lstrip()
text = WEEKDAY_RE.sub("", text, 1) # Useless weekday
# tz is time zone specifier string
day, mon, yr, hr, min, sec, tz = [None]*7
# loose regexp parse
m = LOOSE_HTTP_DATE_RE.search(text)
if m is not None:
day, mon, yr, hr, min, sec, tz = m.groups()
else:
return None # bad format
return _str2time(day, mon, yr, hr, min, sec, tz)
|
Returns time in seconds since epoch of time represented by a string.
Return value is an integer.
None is returned if the format of str is unrecognized, the time is outside
the representable range, or the timezone string is not recognized. If the
string contains no timezone, UTC is assumed.
The timezone in the string may be numerical (like "-0800" or "+0100") or a
string timezone (like "UTC", "GMT", "BST" or "EST"). Currently, only the
timezone strings equivalent to UTC (zero offset) are known to the function.
The function loosely parses the following formats:
Wed, 09 Feb 1994 22:23:32 GMT -- HTTP format
Tuesday, 08-Feb-94 14:15:29 GMT -- old rfc850 HTTP format
Tuesday, 08-Feb-1994 14:15:29 GMT -- broken rfc850 HTTP format
09 Feb 1994 22:23:32 GMT -- HTTP format (no weekday)
08-Feb-94 14:15:29 GMT -- rfc850 format (no weekday)
08-Feb-1994 14:15:29 GMT -- broken rfc850 format (no weekday)
The parser ignores leading and trailing whitespace. The time may be
absent.
If the year is given with only 2 digits, the function will select the
century that makes the year closest to the current date.
|
def compare_config(self):
"""
Netmiko is being used to obtain config diffs because pan-python
doesn't support the needed command.
"""
if self.ssh_connection is False:
self._open_ssh()
self.ssh_device.exit_config_mode()
diff = self.ssh_device.send_command("show config diff")
return diff.strip()
|
Netmiko is being used to obtain config diffs because pan-python
doesn't support the needed command.
|
def snippetWithLink(self, url):
""" This method will try to return the first
<p> or <div> that contains an <a> tag linking to
the given URL.
"""
link = self.soup.find("a", attrs={'href': url})
if link:
for p in link.parents:
if p.name in ('p', 'div'):
return ' '.join(p.text.split()[0:30])
return None
|
This method will try to return the first
<p> or <div> that contains an <a> tag linking to
the given URL.
|
def log_info(self, data):
''' Logs successful responses '''
info = 'label={label}, id={id}, ilx={ilx}, superclass_tid={super_id}'
info_filled = info.format(label = data['label'],
id = data['id'],
ilx = data['ilx'],
super_id = data['superclasses'][0]['id'])
logging.info(info_filled)
return info_filled
|
Logs successful responses
|
def _is_intrinsic_dict(self, input):
"""
Can the input represent an intrinsic function in it?
:param input: Object to be checked
:return: True, if the input contains a supported intrinsic function. False otherwise
"""
# All intrinsic functions are dictionaries with just one key
return isinstance(input, dict) \
and len(input) == 1 \
and list(input.keys())[0] in self.supported_intrinsics
|
Can the input represent an intrinsic function in it?
:param input: Object to be checked
:return: True, if the input contains a supported intrinsic function. False otherwise
|
def compress_dir(path, compression="gz"):
"""
Recursively compresses all files in a directory. Note that this
compresses all files singly, i.e., it does not create a tar archive. For
that, just use Python tarfile class.
Args:
path (str): Path to parent directory.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to gz.
"""
for parent, subdirs, files in os.walk(path):
for f in files:
compress_file(os.path.join(parent, f), compression=compression)
|
Recursively compresses all files in a directory. Note that this
compresses all files singly, i.e., it does not create a tar archive. For
that, just use Python tarfile class.
Args:
path (str): Path to parent directory.
compression (str): A compression mode. Valid options are "gz" or
"bz2". Defaults to gz.
|
def make_filename(s, space=None, language='msdos', strict=False, max_len=None, repeats=1024):
r"""Process string to remove any characters not allowed by the language specified (default: MSDOS)
In addition, optionally replace spaces with the indicated "space" character
(to make the path useful in a copy-paste without quoting).
Uses the following regular expression to substitute spaces for invalid characters:
re.sub(r'[ :\\/?*&"<>|~`!]{1}', space, s)
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=False)
'Whatever-crazy-s-$h-7-n-m3-ou-can-come-up.-with.-txt-'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=False, repeats=1)
'Whatever-crazy--s-$h-7-n-m3----ou--can-come-up.-with.-txt--'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', repeats=1)
'Whatever-crazy--s-$h-7-n-m3----ou--can-come-up.-with.-txt--'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!')
'Whatever-crazy-s-$h-7-n-m3-ou-can-come-up.-with.-txt-'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=True, repeats=1)
'Whatever_crazy_s_h_7_n_m3_ou_can_come_up_with_txt_'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=True, repeats=1, max_len=14)
'Whatever_crazy'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', max_len=14)
'Whatever-crazy'
"""
filename = None
if strict or language.lower().strip() in ('strict', 'variable', 'expression', 'python'):
if space is None:
space = '_'
elif not space:
space = ''
filename = make_name(s, space=space, lower=False)
else:
if space is None:
space = '-'
elif not space:
space = ''
if not filename:
if language.lower().strip() in ('posix', 'unix', 'linux', 'centos', 'ubuntu', 'fedora',
'redhat', 'rhel', 'debian', 'deb'):
filename = re.sub(r'[^0-9A-Za-z._-]' + '\{1,{0}\}'.format(repeats), space, s)
else:
filename = re.sub(r'[ :\\/?*&"<>|~`!]{' + ('1,{0}'.format(repeats)) + r'}', space, s)
if max_len and int(max_len) > 0 and filename:
return filename[:int(max_len)]
else:
return filename
|
r"""Process string to remove any characters not allowed by the language specified (default: MSDOS)
In addition, optionally replace spaces with the indicated "space" character
(to make the path useful in a copy-paste without quoting).
Uses the following regular expression to substitute spaces for invalid characters:
re.sub(r'[ :\\/?*&"<>|~`!]{1}', space, s)
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=False)
'Whatever-crazy-s-$h-7-n-m3-ou-can-come-up.-with.-txt-'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=False, repeats=1)
'Whatever-crazy--s-$h-7-n-m3----ou--can-come-up.-with.-txt--'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', repeats=1)
'Whatever-crazy--s-$h-7-n-m3----ou--can-come-up.-with.-txt--'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!')
'Whatever-crazy-s-$h-7-n-m3-ou-can-come-up.-with.-txt-'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=True, repeats=1)
'Whatever_crazy_s_h_7_n_m3_ou_can_come_up_with_txt_'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', strict=True, repeats=1, max_len=14)
'Whatever_crazy'
>>> make_filename(r'Whatever crazy &s $h!7 n*m3 ~\/ou/ can come up. with.`txt`!', max_len=14)
'Whatever-crazy'
|
def melt(self, plot=False):
"""
Find and merge groups of polygons in the surface that meet the
following criteria:
* Are coplanars.
* Are contiguous.
* The result is convex.
This method is very useful at reducing the number the items and,
therefore, the shadowing time computing. Before override this
instance, it is saved and can be restored with ``.restore()``
:param plot: If True, generates the before and after
visualizations for the surface. Use it to check the results.
:type plot: bool
:returns: None
.. warning:: This method do not check if the merged polygons are
actually convex. The convex hull of the union is directly
calculated. For this reason, it is very important to visualy
check the solution.
"""
from pyny3d.utils import bool2index
from scipy.spatial import ConvexHull
# First, coplanarity
## Normalize parametric equations
para = [poly.get_parametric() for poly in self]
para = np.array([p/np.linalg.norm(p) for p in para])
n = para.shape[0]
## Coincidences
cop = []
for i, plane in enumerate(para[:-1]):
indexes = np.zeros((n-i-1, 4))
for c in range(4):
indexes[:, c] = np.isclose(para[i+1:, c], plane[c])
pos = bool2index(indexes.sum(axis=1)==4)+i+1
if pos.shape[0] > 0:
cop.append(np.hstack((i, pos)))
para[pos, :] = np.nan
# Second, contiguity
substituted = []
cop_cont = []
for i, group in enumerate(cop):
polygons = [self[i] for i in group]
if Surface.contiguous(polygons):
cop_cont.append(polygons)
substituted.append(group)
if len(substituted) != 0:
self.save()
if plot: self.plot()
substituted = sum(substituted)
# Hull
merged = []
for polygons in cop_cont:
points = np.concatenate([polygon.points
for polygon in polygons])
hull = ConvexHull(points[:, :2])
merged.append(Polygon(points[hull.vertices]))
# Final substitution
new_surface = [self[i] for i in range(len(self.polygons))
if i not in substituted]
new_surface += merged
self.polygons = new_surface
self.sorted_areas = None
if plot: self.plot()
|
Find and merge groups of polygons in the surface that meet the
following criteria:
* Are coplanars.
* Are contiguous.
* The result is convex.
This method is very useful at reducing the number the items and,
therefore, the shadowing time computing. Before override this
instance, it is saved and can be restored with ``.restore()``
:param plot: If True, generates the before and after
visualizations for the surface. Use it to check the results.
:type plot: bool
:returns: None
.. warning:: This method do not check if the merged polygons are
actually convex. The convex hull of the union is directly
calculated. For this reason, it is very important to visualy
check the solution.
|
def list(self, pattern='*'):
"""Returns a list of resource descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``.
Returns:
A list of ResourceDescriptor objects that match the filters.
"""
if self._descriptors is None:
self._descriptors = self._client.list_resource_descriptors(
filter_string=self._filter_string)
return [resource for resource in self._descriptors
if fnmatch.fnmatch(resource.type, pattern)]
|
Returns a list of resource descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"aws*"``, ``"*cluster*"``.
Returns:
A list of ResourceDescriptor objects that match the filters.
|
def run_action(self, unit_sentry, action,
_check_output=subprocess.check_output,
params=None):
"""Translate to amulet's built in run_action(). Deprecated.
Run the named action on a given unit sentry.
params a dict of parameters to use
_check_output parameter is no longer used
@return action_id.
"""
self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been '
'deprecated for amulet.run_action')
return unit_sentry.run_action(action, action_args=params)
|
Translate to amulet's built in run_action(). Deprecated.
Run the named action on a given unit sentry.
params a dict of parameters to use
_check_output parameter is no longer used
@return action_id.
|
def _GetGsScopes(self):
"""Return all Google Storage scopes available on this VM."""
service_accounts = self.watcher.GetMetadata(metadata_key=self.metadata_key)
try:
scopes = service_accounts[self.service_account]['scopes']
return list(GS_SCOPES.intersection(set(scopes))) if scopes else None
except KeyError:
return None
|
Return all Google Storage scopes available on this VM.
|
def precompute_optimzation_Y(laplacian_matrix, n_samples, relaxation_kwds):
"""compute Lk, neighbors and subset to index map for projected == False"""
relaxation_kwds.setdefault('presave',False)
relaxation_kwds.setdefault('presave_name','pre_comp_current.npy')
relaxation_kwds.setdefault('verbose',False)
if relaxation_kwds['verbose']:
print ('Making Lk and nbhds')
Lk_tensor, nbk, si_map = \
compute_Lk(laplacian_matrix, n_samples, relaxation_kwds['subset'])
if relaxation_kwds['presave']:
raise NotImplementedError('Not yet implemented presave')
return { 'Lk': Lk_tensor, 'nbk': nbk, 'si_map': si_map }
|
compute Lk, neighbors and subset to index map for projected == False
|
def op(scalars_layout, collections=None):
"""Creates a summary that contains a layout.
When users navigate to the custom scalars dashboard, they will see a layout
based on the proto provided to this function.
Args:
scalars_layout: The scalars_layout_pb2.Layout proto that specifies the
layout.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A tensor summary op that writes the layout to disk.
"""
# TODO(nickfelt): remove on-demand imports once dep situation is fixed.
import tensorflow.compat.v1 as tf
assert isinstance(scalars_layout, layout_pb2.Layout)
summary_metadata = metadata.create_summary_metadata()
return tf.summary.tensor_summary(name=metadata.CONFIG_SUMMARY_TAG,
tensor=tf.constant(
scalars_layout.SerializeToString(),
dtype=tf.string),
collections=collections,
summary_metadata=summary_metadata)
|
Creates a summary that contains a layout.
When users navigate to the custom scalars dashboard, they will see a layout
based on the proto provided to this function.
Args:
scalars_layout: The scalars_layout_pb2.Layout proto that specifies the
layout.
collections: Optional list of graph collections keys. The new
summary op is added to these collections. Defaults to
`[Graph Keys.SUMMARIES]`.
Returns:
A tensor summary op that writes the layout to disk.
|
def set_idle_params(self, timeout=None, exit=None):
"""Activate idle mode - put uWSGI in cheap mode after inactivity timeout.
:param int timeout: Inactivity timeout in seconds.
:param bool exit: Shutdown uWSGI when idle.
"""
self._set('idle', timeout)
self._set('die-on-idle', exit, cast=bool)
return self._section
|
Activate idle mode - put uWSGI in cheap mode after inactivity timeout.
:param int timeout: Inactivity timeout in seconds.
:param bool exit: Shutdown uWSGI when idle.
|
def upload_sequence_fileobj(file_obj, file_name, fields, retry_fields, session, samples_resource):
"""Uploads a single file-like object to the One Codex server via either fastx-proxy or directly
to S3.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST. Must include 'sample_id' and
'upload_url' at a minimum.
retry_fields : `dict`
Metadata sent to `init_multipart_upload` in the case that the upload via fastx-proxy fails.
session : `requests.Session`
Connection to One Codex API.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample ID of newly uploaded file.
"""
# First attempt to upload via our validating proxy
try:
_direct_upload(file_obj, file_name, fields, session, samples_resource)
sample_id = fields["sample_id"]
except RetryableUploadException:
# upload failed--retry direct upload to S3 intermediate
logging.error("{}: Connectivity issue, trying direct upload...".format(file_name))
file_obj.seek(0) # reset file_obj back to start
try:
retry_fields = samples_resource.init_multipart_upload(retry_fields)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
s3_upload = _s3_intermediate_upload(
file_obj,
file_name,
retry_fields,
session,
samples_resource._client._root_url + retry_fields["callback_url"], # full callback url
)
sample_id = s3_upload.get("sample_id", "<UUID not yet assigned>")
logging.info("{}: finished as sample {}".format(file_name, sample_id))
return sample_id
|
Uploads a single file-like object to the One Codex server via either fastx-proxy or directly
to S3.
Parameters
----------
file_obj : `FASTXInterleave`, `FilePassthru`, or a file-like object
A wrapper around a pair of fastx files (`FASTXInterleave`) or a single fastx file. In the
case of paired files, they will be interleaved and uploaded uncompressed. In the case of a
single file, it will simply be passed through (`FilePassthru`) to One Codex, compressed
or otherwise. If a file-like object is given, its mime-type will be sent as 'text/plain'.
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
fields : `dict`
Additional data fields to include as JSON in the POST. Must include 'sample_id' and
'upload_url' at a minimum.
retry_fields : `dict`
Metadata sent to `init_multipart_upload` in the case that the upload via fastx-proxy fails.
session : `requests.Session`
Connection to One Codex API.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Raises
------
UploadException
In the case of a fatal exception during an upload.
Returns
-------
`string` containing sample ID of newly uploaded file.
|
def __create_author_investigator_str(self):
"""
When investigators is empty, try to get authors from the first publication instead.
:return str author: Author names
"""
_author = ""
try:
for pub in self.noaa_data_sorted["Publication"]:
if "author" in pub:
if pub["author"]:
_author_src = pub["author"]
if isinstance(_author_src, str):
try:
if " and " in _author_src:
_author = _author_src.replace(" and ", "; ")
elif ";" in _author_src:
# If there is a semi-colon, add a space after it, just in case it didn't have one
_author = _author_src.replace(";", "; ")
break
except Exception as e:
_author = ""
elif isinstance(_author_src, list):
try:
for _entry in _author_src:
_author += _entry["name"].split(",")[0] + ", "
except Exception as e:
_author = ""
except Exception:
_author = ""
return _author
|
When investigators is empty, try to get authors from the first publication instead.
:return str author: Author names
|
def get_results_msg(self, results, study):
"""Return summary for GOEA results."""
# To convert msg list to string: "\n".join(msg)
msg = []
if results:
fmt = "{M:6,} GO terms are associated with {N:6,} of {NT:6,}"
stu_items, num_gos_stu = self.get_item_cnt(results, "study_items")
pop_items, num_gos_pop = self.get_item_cnt(results, "pop_items")
stu_txt = fmt.format(N=len(stu_items), M=num_gos_stu, NT=len(set(study)))
pop_txt = fmt.format(N=len(pop_items), M=num_gos_pop, NT=self.pop_n)
msg.append("{POP} population items".format(POP=pop_txt))
msg.append("{STU} study items".format(STU=stu_txt))
return msg
|
Return summary for GOEA results.
|
def decrement_display_ref_count(self, amount: int=1):
"""Decrement display reference count to indicate this library item is no longer displayed."""
assert not self._closed
self.__display_ref_count -= amount
if self.__display_ref_count == 0:
self.__is_master = False
if self.__data_item:
for _ in range(amount):
self.__data_item.decrement_data_ref_count()
|
Decrement display reference count to indicate this library item is no longer displayed.
|
def render_targets_weighted_spans(
targets, # type: List[TargetExplanation]
preserve_density, # type: Optional[bool]
):
# type: (...) -> List[Optional[str]]
""" Return a list of rendered weighted spans for targets.
Function must accept a list in order to select consistent weight
ranges across all targets.
"""
prepared_weighted_spans = prepare_weighted_spans(
targets, preserve_density)
def _fmt_pws(pws):
# type: (PreparedWeightedSpans) -> str
name = ('<b>{}:</b> '.format(pws.doc_weighted_spans.vec_name)
if pws.doc_weighted_spans.vec_name else '')
return '{}{}'.format(name, render_weighted_spans(pws))
def _fmt_pws_list(pws_lst):
# type: (List[PreparedWeightedSpans]) -> str
return '<br/>'.join(_fmt_pws(pws) for pws in pws_lst)
return [_fmt_pws_list(pws_lst) if pws_lst else None
for pws_lst in prepared_weighted_spans]
|
Return a list of rendered weighted spans for targets.
Function must accept a list in order to select consistent weight
ranges across all targets.
|
def copyMakeBorder(src, top, bot, left, right, *args, **kwargs):
"""Pad image border with OpenCV.
Parameters
----------
src : NDArray
source image
top : int, required
Top margin.
bot : int, required
Bottom margin.
left : int, required
Left margin.
right : int, required
Right margin.
type : int, optional, default='0'
Filling type (default=cv2.BORDER_CONSTANT).
0 - cv2.BORDER_CONSTANT - Adds a constant colored border.
1 - cv2.BORDER_REFLECT - Border will be mirror reflection of the
border elements, like this : fedcba|abcdefgh|hgfedcb
2 - cv2.BORDER_REFLECT_101 or cv.BORDER_DEFAULT - Same as above,
but with a slight change, like this : gfedcb|abcdefgh|gfedcba
3 - cv2.BORDER_REPLICATE - Last element is replicated throughout,
like this: aaaaaa|abcdefgh|hhhhhhh
4 - cv2.BORDER_WRAP - it will look like this : cdefgh|abcdefgh|abcdefg
value : double, optional, default=0
(Deprecated! Use ``values`` instead.) Fill with single value.
values : tuple of <double>, optional, default=[]
Fill with value(RGB[A] or gray), up to 4 channels.
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
--------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> new_image = mx_border = mx.image.copyMakeBorder(mx_img, 1, 2, 3, 4, type=0)
>>> new_image
<NDArray 2324x3489x3 @cpu(0)>
"""
return _internal._cvcopyMakeBorder(src, top, bot, left, right, *args, **kwargs)
|
Pad image border with OpenCV.
Parameters
----------
src : NDArray
source image
top : int, required
Top margin.
bot : int, required
Bottom margin.
left : int, required
Left margin.
right : int, required
Right margin.
type : int, optional, default='0'
Filling type (default=cv2.BORDER_CONSTANT).
0 - cv2.BORDER_CONSTANT - Adds a constant colored border.
1 - cv2.BORDER_REFLECT - Border will be mirror reflection of the
border elements, like this : fedcba|abcdefgh|hgfedcb
2 - cv2.BORDER_REFLECT_101 or cv.BORDER_DEFAULT - Same as above,
but with a slight change, like this : gfedcb|abcdefgh|gfedcba
3 - cv2.BORDER_REPLICATE - Last element is replicated throughout,
like this: aaaaaa|abcdefgh|hhhhhhh
4 - cv2.BORDER_WRAP - it will look like this : cdefgh|abcdefgh|abcdefg
value : double, optional, default=0
(Deprecated! Use ``values`` instead.) Fill with single value.
values : tuple of <double>, optional, default=[]
Fill with value(RGB[A] or gray), up to 4 channels.
out : NDArray, optional
The output NDArray to hold the result.
Returns
-------
out : NDArray or list of NDArrays
The output of this function.
Example
--------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> new_image = mx_border = mx.image.copyMakeBorder(mx_img, 1, 2, 3, 4, type=0)
>>> new_image
<NDArray 2324x3489x3 @cpu(0)>
|
def get_raw_input(description, default=False):
"""Get user input from the command line via raw_input / input.
description (unicode): Text to display before prompt.
default (unicode or False/None): Default value to display with prompt.
RETURNS (unicode): User input.
"""
additional = ' (default: %s)' % default if default else ''
prompt = ' %s%s: ' % (description, additional)
user_input = input_(prompt)
return user_input
|
Get user input from the command line via raw_input / input.
description (unicode): Text to display before prompt.
default (unicode or False/None): Default value to display with prompt.
RETURNS (unicode): User input.
|
def set_default_names(data):
"""Sets index names to 'index' for regular, or 'level_x' for Multi"""
if all(name is not None for name in data.index.names):
return data
data = data.copy()
if data.index.nlevels > 1:
names = [name if name is not None else 'level_{}'.format(i)
for i, name in enumerate(data.index.names)]
data.index.names = names
else:
data.index.name = data.index.name or 'index'
return data
|
Sets index names to 'index' for regular, or 'level_x' for Multi
|
def _tag_net_direction(data):
"""Create a tag based on the direction of the traffic"""
# IP or IPv6
src = data['packet']['src_domain']
dst = data['packet']['dst_domain']
if src == 'internal':
if dst == 'internal' or 'multicast' in dst or 'broadcast' in dst:
return 'internal'
else:
return 'outgoing'
elif dst == 'internal':
return 'incoming'
else:
return None
|
Create a tag based on the direction of the traffic
|
def extend(self, clauses, weights=None):
"""
Add several clauses to WCNF formula. The clauses should be given in
the form of list. For every clause in the list, method
:meth:`append` is invoked.
The clauses can be hard or soft depending on the ``weights``
argument. If no weights are set, the clauses are considered to be
hard.
:param clauses: a list of new clauses to add.
:param weights: a list of integer weights.
:type clauses: list(list(int))
:type weights: list(int)
Example:
.. code-block:: python
>>> from pysat.formula import WCNF
>>> cnf = WCNF()
>>> cnf.extend([[-3, 4], [5, 6]])
>>> cnf.extend([[3], [-4], [-5], [-6]], weights=[1, 5, 3, 4])
>>> print cnf.hard
[[-3, 4], [5, 6]]
>>> print cnf.soft
[[3], [-4], [-5], [-6]]
>>> print cnf.wght
[1, 5, 3, 4]
"""
if weights:
# clauses are soft
for i, cl in enumerate(clauses):
self.append(cl, weight=weights[i])
else:
# clauses are hard
for cl in clauses:
self.append(cl)
|
Add several clauses to WCNF formula. The clauses should be given in
the form of list. For every clause in the list, method
:meth:`append` is invoked.
The clauses can be hard or soft depending on the ``weights``
argument. If no weights are set, the clauses are considered to be
hard.
:param clauses: a list of new clauses to add.
:param weights: a list of integer weights.
:type clauses: list(list(int))
:type weights: list(int)
Example:
.. code-block:: python
>>> from pysat.formula import WCNF
>>> cnf = WCNF()
>>> cnf.extend([[-3, 4], [5, 6]])
>>> cnf.extend([[3], [-4], [-5], [-6]], weights=[1, 5, 3, 4])
>>> print cnf.hard
[[-3, 4], [5, 6]]
>>> print cnf.soft
[[3], [-4], [-5], [-6]]
>>> print cnf.wght
[1, 5, 3, 4]
|
def ssn(self):
"""
Returns a 11 digits Belgian SSN called "rijksregisternummer" as a string
The first 6 digits represent the birthdate with (in order) year, month and day.
The second group of 3 digits is represents a sequence number (order of birth).
It is even for women and odd for men.
For men the range starts at 1 and ends 997, for women 2 until 998.
The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).
Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.
For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2
(add 2000000000) before the division by 97.
"""
# see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)
def _checksum(digits):
res = 97 - (digits % 97)
return res
# Generate a date (random)
mydate = self.generator.date()
# Convert it to an int
elms = mydate.split("-")
# Adjust for year 2000 if necessary
if elms[0][0] == '2':
above = True
else:
above = False
# Only keep the last 2 digits of the year
elms[0] = elms[0][2:4]
# Simulate the gender/sequence - should be 3 digits
seq = self.generator.random_int(1, 998)
# Right justify sequence and append to list
seq_str = "{:0>3}".format(seq)
elms.append(seq_str)
# Now convert list to an integer so the checksum can be calculated
date_as_int = int("".join(elms))
if above:
date_as_int += 2000000000
# Generate checksum
s = _checksum(date_as_int)
s_rjust = "{:0>2}".format(s)
# return result as a string
elms.append(s_rjust)
return "".join(elms)
|
Returns a 11 digits Belgian SSN called "rijksregisternummer" as a string
The first 6 digits represent the birthdate with (in order) year, month and day.
The second group of 3 digits is represents a sequence number (order of birth).
It is even for women and odd for men.
For men the range starts at 1 and ends 997, for women 2 until 998.
The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).
Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.
For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2
(add 2000000000) before the division by 97.
|
def parse_config(file_name='/usr/local/etc/pkg.conf'):
'''
Return dict of uncommented global variables.
CLI Example:
.. code-block:: bash
salt '*' pkg.parse_config
``NOTE:`` not working properly right now
'''
ret = {}
if not os.path.isfile(file_name):
return 'Unable to find {0} on file system'.format(file_name)
with salt.utils.files.fopen(file_name) as ifile:
for line in ifile:
line = salt.utils.stringutils.to_unicode(line)
if line.startswith('#') or line.startswith('\n'):
pass
else:
key, value = line.split('\t')
ret[key] = value
ret['config_file'] = file_name
return ret
|
Return dict of uncommented global variables.
CLI Example:
.. code-block:: bash
salt '*' pkg.parse_config
``NOTE:`` not working properly right now
|
def bisect(func, a, b, xtol=1e-12, maxiter=100):
"""
Finds the root of `func` using the bisection method.
Requirements
------------
- func must be continuous function that accepts a single number input
and returns a single number
- `func(a)` and `func(b)` must have opposite sign
Parameters
----------
func : function
the function that we want to find the root of
a : number
one of the bounds on the input
b : number
the other bound on the input
xtol : number, optional
the solution tolerance of the input value. The algorithm is
considered converged if `abs(b-a)2. < xtol`
maxiter : number, optional
the maximum number of iterations allowed for convergence
"""
fa = func(a)
if fa == 0.:
return a
fb = func(b)
if fb == 0.:
return b
assert sign(fa) != sign(fb)
for i in xrange(maxiter):
c = (a + b) / 2.
fc = func(c)
if fc == 0. or abs(b - a) / 2. < xtol:
return c
if sign(fc) == sign(func(a)):
a = c
else:
b = c
else:
raise RuntimeError('Failed to converge after %d iterations.' % maxiter)
|
Finds the root of `func` using the bisection method.
Requirements
------------
- func must be continuous function that accepts a single number input
and returns a single number
- `func(a)` and `func(b)` must have opposite sign
Parameters
----------
func : function
the function that we want to find the root of
a : number
one of the bounds on the input
b : number
the other bound on the input
xtol : number, optional
the solution tolerance of the input value. The algorithm is
considered converged if `abs(b-a)2. < xtol`
maxiter : number, optional
the maximum number of iterations allowed for convergence
|
def from_text(text):
"""Convert text into an rcode.
@param text: the texual rcode
@type text: string
@raises UnknownRcode: the rcode is unknown
@rtype: int
"""
if text.isdigit():
v = int(text)
if v >= 0 and v <= 4095:
return v
v = _by_text.get(text.upper())
if v is None:
raise UnknownRcode
return v
|
Convert text into an rcode.
@param text: the texual rcode
@type text: string
@raises UnknownRcode: the rcode is unknown
@rtype: int
|
def _draw(self, color):
"""Draw the gradient and the selection cross on the canvas."""
width = self.winfo_width()
height = self.winfo_height()
self.delete("bg")
self.delete("cross_h")
self.delete("cross_v")
del self.bg
self.bg = tk.PhotoImage(width=width, height=height, master=self)
self._fill()
self.create_image(0, 0, image=self.bg, anchor="nw", tags="bg")
self.tag_lower("bg")
h, s, v = color
x = v / 100.
y = (1 - s / 100.)
self.create_line(0, y * height, width, y * height, tags="cross_h",
fill="#C2C2C2")
self.create_line(x * width, 0, x * width, height, tags="cross_v",
fill="#C2C2C2")
|
Draw the gradient and the selection cross on the canvas.
|
def revoke(self, paths: Union[str, Iterable[str]], users: Union[str, Iterable[str], User, Iterable[User]],
recursive: bool=False):
"""
See `AccessControlMapper.revoke`.
:param paths: see `AccessControlMapper.revoke`
:param users: see `AccessControlMapper.revoke`
:param recursive: whether the access control list should be changed recursively for all nested collections
"""
|
See `AccessControlMapper.revoke`.
:param paths: see `AccessControlMapper.revoke`
:param users: see `AccessControlMapper.revoke`
:param recursive: whether the access control list should be changed recursively for all nested collections
|
def main():
'''main routine'''
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
sys.exit('Error: Expecting azurermconfig.json in current folder')
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
vmsslist = azurerm.list_vmss_sub(access_token, subscription_id)
for vmss in vmsslist['value']:
name = vmss['name']
location = vmss['location']
capacity = vmss['sku']['capacity']
print(''.join(['Name: ', name, ', location: ',
location, ', Capacity: ', str(capacity)]))
print('VMSS NICs...')
rgname = get_rg_from_id(vmss['id'])
vmss_nics = azurerm.get_vmss_nics(
access_token, subscription_id, rgname, name)
print(json.dumps(vmss_nics, sort_keys=False,
indent=2, separators=(',', ': ')))
print('VMSS Virtual machines...')
vms = azurerm.list_vmss_vms(
access_token, subscription_id, rgname, name)
#print(json.dumps(vms, sort_keys=False, indent=2, separators=(',', ': ')))
for vmssvm in vms['value']:
vm_id = vmssvm['instanceId']
print(vm_id + ', ' + vmssvm['name'] + '\n')
print('VMSS VM NICs...')
vmnics = azurerm.get_vmss_vm_nics(access_token, subscription_id, rgname, name,
vm_id)
print(json.dumps(vmnics, sort_keys=False,
indent=2, separators=(',', ': ')))
|
main routine
|
def bundle_dir():
"""Handle resource management within an executable file."""
if frozen():
directory = sys._MEIPASS
else:
directory = os.path.dirname(os.path.abspath(stack()[1][1]))
if os.path.exists(directory):
return directory
|
Handle resource management within an executable file.
|
def tenant_token(self):
"""The cached token of the current tenant."""
rv = getattr(self, '_tenant_token', None)
if rv is None:
rv = self._tenant_token = self.tenant.get_token()
return rv
|
The cached token of the current tenant.
|
def resolve(self, file_path, follow_symlinks=True, allow_fd=False):
"""Search for the specified filesystem object, resolving all links.
Args:
file_path: Specifies the target FakeFile object to retrieve.
follow_symlinks: If `False`, the link itself is resolved,
otherwise the object linked to.
allow_fd: If `True`, `file_path` may be an open file descriptor
Returns:
The FakeFile object corresponding to `file_path`.
Raises:
IOError: if the object is not found.
"""
if isinstance(file_path, int):
if allow_fd and sys.version_info >= (3, 3):
return self.get_open_file(file_path).get_object()
raise TypeError('path should be string, bytes or '
'os.PathLike (if supported), not int')
if follow_symlinks:
file_path = make_string_path(file_path)
return self.get_object_from_normpath(self.resolve_path(file_path))
return self.lresolve(file_path)
|
Search for the specified filesystem object, resolving all links.
Args:
file_path: Specifies the target FakeFile object to retrieve.
follow_symlinks: If `False`, the link itself is resolved,
otherwise the object linked to.
allow_fd: If `True`, `file_path` may be an open file descriptor
Returns:
The FakeFile object corresponding to `file_path`.
Raises:
IOError: if the object is not found.
|
def ping(self):
"""
Ping the LDBD Server and return any message received back as a string.
@return: message received (may be empty) from LDBD Server as a string
"""
msg = "PING\0"
self.sfile.write(msg)
ret, output = self.__response__()
reply = str(output[0])
if ret:
msg = "Error pinging server %d:%s" % (ret, reply)
raise LDBDClientException, msg
return reply
|
Ping the LDBD Server and return any message received back as a string.
@return: message received (may be empty) from LDBD Server as a string
|
def catalog(self, categories=None):
"""
Return information about documents in corpora:
a list of tuples (doc_id, doc_title).
"""
ids = self._filter_ids(None, categories)
doc_meta = self._get_meta()
return [(doc_id, doc_meta[doc_id].title) for doc_id in ids]
|
Return information about documents in corpora:
a list of tuples (doc_id, doc_title).
|
def _tls_pad(self, s):
"""
Provided with the concatenation of the TLSCompressed.fragment and the
HMAC, append the right padding and return it as a whole.
This is the TLS-style padding: while SSL allowed for random padding,
TLS (misguidedly) specifies the repetition of the same byte all over,
and this byte must be equal to len(<entire padding>) - 1.
Meant to be used with a block cipher only.
"""
padding = b""
block_size = self.tls_session.wcs.cipher.block_size
padlen = block_size - ((len(s) + 1) % block_size)
if padlen == block_size:
padlen = 0
pad_pattern = chb(padlen)
padding = pad_pattern * (padlen + 1)
return s + padding
|
Provided with the concatenation of the TLSCompressed.fragment and the
HMAC, append the right padding and return it as a whole.
This is the TLS-style padding: while SSL allowed for random padding,
TLS (misguidedly) specifies the repetition of the same byte all over,
and this byte must be equal to len(<entire padding>) - 1.
Meant to be used with a block cipher only.
|
def raise_sigint():
"""
Raising the SIGINT signal in the current process and all sub-processes.
os.kill() only issues a signal in the current process (without subprocesses).
CTRL+C on the console sends the signal to the process group (which we need).
"""
if hasattr(signal, 'CTRL_C_EVENT'):
# windows. Need CTRL_C_EVENT to raise the signal in the whole process group
os.kill(os.getpid(), signal.CTRL_C_EVENT)
else:
# unix.
pgid = os.getpgid(os.getpid())
if pgid == 1:
os.kill(os.getpid(), signal.SIGINT)
else:
os.killpg(os.getpgid(os.getpid()), signal.SIGINT)
|
Raising the SIGINT signal in the current process and all sub-processes.
os.kill() only issues a signal in the current process (without subprocesses).
CTRL+C on the console sends the signal to the process group (which we need).
|
def touch(self, connection=None):
"""
Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created.
"""
self.create_marker_table()
if connection is None:
connection = self.connect()
connection.execute_non_query(
"""IF NOT EXISTS(SELECT 1
FROM {marker_table}
WHERE update_id = %(update_id)s)
INSERT INTO {marker_table} (update_id, target_table)
VALUES (%(update_id)s, %(table)s)
ELSE
UPDATE t
SET target_table = %(table)s
, inserted = GETDATE()
FROM {marker_table} t
WHERE update_id = %(update_id)s
""".format(marker_table=self.marker_table),
{"update_id": self.update_id, "table": self.table})
# make sure update is properly marked
assert self.exists(connection)
|
Mark this update as complete.
IMPORTANT, If the marker table doesn't exist,
the connection transaction will be aborted and the connection reset.
Then the marker table will be created.
|
def generatemouseevent(self, x, y, eventType="b1c",
drag_button_override='drag_default_button'):
"""
Generate mouse event on x, y co-ordinates.
@param x: X co-ordinate
@type x: int
@param y: Y co-ordinate
@type y: int
@param eventType: Mouse click type
@type eventType: str
@param drag_button_override: Any drag_xxx value
Only relevant for movements, i.e. |type| = "abs" or "rel"
Quartz is not fully compatible with windows, so for drags
the drag button must be explicitly defined. generatemouseevent
will remember the last button pressed by default, and drag
that button, use this argument to override that.
@type drag_button_override: str
@return: 1 on success.
@rtype: integer
"""
if drag_button_override not in mouse_click_override:
raise ValueError('Unsupported drag_button_override type: %s' % \
drag_button_override)
global drag_button_remembered
point = (x, y)
button = centre # Only matters for "other" buttons
click_type = None
if eventType == "abs" or eventType == "rel":
if drag_button_override is not 'drag_default_button':
events = [mouse_click_override[drag_button_override]]
elif drag_button_remembered:
events = [drag_button_remembered]
else:
events = [move]
if eventType == "rel":
point = CGEventGetLocation(CGEventCreate(None))
point.x += x
point.y += y
elif eventType == "b1p":
events = [press_left]
drag_button_remembered = drag_left
elif eventType == "b1r":
events = [release_left]
drag_button_remembered = None
elif eventType == "b1c":
events = [press_left, release_left]
elif eventType == "b1d":
events = [press_left, release_left]
click_type = double_click
elif eventType == "b2p":
events = [press_other]
drag_button_remembered = drag_other
elif eventType == "b2r":
events = [release_other]
drag_button_remembered = None
elif eventType == "b2c":
events = [press_other, release_other]
elif eventType == "b2d":
events = [press_other, release_other]
click_type = double_click
elif eventType == "b3p":
events = [press_right]
drag_button_remembered = drag_right
elif eventType == "b3r":
events = [release_right]
drag_button_remembered = None
elif eventType == "b3c":
events = [press_right, release_right]
elif eventType == "b3d":
events = [press_right, release_right]
click_type = double_click
else:
raise LdtpServerException(u"Mouse event '%s' not implemented" % eventType)
for event in events:
CG_event = CGEventCreateMouseEvent(None, event, point, button)
if click_type:
CGEventSetIntegerValueField(
CG_event, kCGMouseEventClickState, click_type)
CGEventPost(kCGHIDEventTap, CG_event)
# Give the event time to happen
time.sleep(0.01)
return 1
|
Generate mouse event on x, y co-ordinates.
@param x: X co-ordinate
@type x: int
@param y: Y co-ordinate
@type y: int
@param eventType: Mouse click type
@type eventType: str
@param drag_button_override: Any drag_xxx value
Only relevant for movements, i.e. |type| = "abs" or "rel"
Quartz is not fully compatible with windows, so for drags
the drag button must be explicitly defined. generatemouseevent
will remember the last button pressed by default, and drag
that button, use this argument to override that.
@type drag_button_override: str
@return: 1 on success.
@rtype: integer
|
def sc_to_fc(spvec, nmax, mmax, nrows, ncols):
"""assume Ncols is even"""
fdata = np.zeros([int(nrows), ncols], dtype=np.complex128)
for k in xrange(0, int(ncols / 2)):
if k < mmax:
kk = k
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
kk = -(k + 1)
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
if k == mmax:
kk = k
ind = mindx(kk, nmax, mmax)
vec = spvec[ind:ind + nmax - np.abs(kk) + 1]
fdata[:, kk] = fcvec_m_sc(vec, kk, nmax, nrows)
return fdata
|
assume Ncols is even
|
def from_nds2(cls, nds2channel):
"""Generate a new channel using an existing nds2.channel object
"""
# extract metadata
name = nds2channel.name
sample_rate = nds2channel.sample_rate
unit = nds2channel.signal_units
if not unit:
unit = None
ctype = nds2channel.channel_type_to_string(nds2channel.channel_type)
# get dtype
dtype = { # pylint: disable: no-member
nds2channel.DATA_TYPE_INT16: numpy.int16,
nds2channel.DATA_TYPE_INT32: numpy.int32,
nds2channel.DATA_TYPE_INT64: numpy.int64,
nds2channel.DATA_TYPE_FLOAT32: numpy.float32,
nds2channel.DATA_TYPE_FLOAT64: numpy.float64,
nds2channel.DATA_TYPE_COMPLEX32: numpy.complex64,
}.get(nds2channel.data_type)
return cls(name, sample_rate=sample_rate, unit=unit, dtype=dtype,
type=ctype)
|
Generate a new channel using an existing nds2.channel object
|
def delegate_method(other, method, name=None):
"""Add a method to the current class that delegates to another method.
The *other* argument must be a property that returns the instance to
delegate to. Due to an implementation detail, the property must be defined
in the current class. The *method* argument specifies a method to delegate
to. It can be any callable as long as it takes the instances as its first
argument.
It is a common paradigm in Gruvi to expose protocol methods onto clients.
This keeps most of the logic into the protocol, but prevents the user from
having to type ``'client.protocol.*methodname*'`` all the time.
For example::
class MyClient(Client):
protocol = Client.protocol
delegate_method(protocol, MyProtocol.method)
"""
frame = sys._getframe(1)
classdict = frame.f_locals
@functools.wraps(method)
def delegate(self, *args, **kwargs):
other_self = other.__get__(self)
return method(other_self, *args, **kwargs)
if getattr(method, '__switchpoint__', False):
delegate.__switchpoint__ = True
if name is None:
name = method.__name__
propname = None
for key in classdict:
if classdict[key] is other:
propname = key
break
# If we know the property name, replace the docstring with a small
# reference instead of copying the function docstring.
if propname:
qname = getattr(method, '__qualname__', method.__name__)
if '.' in qname:
delegate.__doc__ = 'A shorthand for ``self.{propname}.{name}()``.' \
.format(name=name, propname=propname)
else:
delegate.__doc__ = 'A shorthand for ``{name}({propname}, ...)``.' \
.format(name=name, propname=propname)
classdict[name] = delegate
|
Add a method to the current class that delegates to another method.
The *other* argument must be a property that returns the instance to
delegate to. Due to an implementation detail, the property must be defined
in the current class. The *method* argument specifies a method to delegate
to. It can be any callable as long as it takes the instances as its first
argument.
It is a common paradigm in Gruvi to expose protocol methods onto clients.
This keeps most of the logic into the protocol, but prevents the user from
having to type ``'client.protocol.*methodname*'`` all the time.
For example::
class MyClient(Client):
protocol = Client.protocol
delegate_method(protocol, MyProtocol.method)
|
def delete_user(self, user_descriptor):
"""DeleteUser.
[Preview API] Disables a user.
:param str user_descriptor: The descriptor of the user to delete.
"""
route_values = {}
if user_descriptor is not None:
route_values['userDescriptor'] = self._serialize.url('user_descriptor', user_descriptor, 'str')
self._send(http_method='DELETE',
location_id='005e26ec-6b77-4e4f-a986-b3827bf241f5',
version='5.1-preview.1',
route_values=route_values)
|
DeleteUser.
[Preview API] Disables a user.
:param str user_descriptor: The descriptor of the user to delete.
|
def read_state_file(self, state_file):
"""Read the state file, if it exists.
If this script has been run previously, resource IDs will have been written to a
state file. On starting a run, a state file will be looked for before creating new
infrastructure. Information on VPCs, security groups, and subnets are saved, as
well as running instances and their states.
AWS has a maximum number of VPCs per region per account, so we do not want to
clutter users' AWS accounts with security groups and VPCs that will be used only
once.
"""
try:
fh = open(state_file, 'r')
state = json.load(fh)
self.vpc_id = state['vpcID']
self.sg_id = state['sgID']
self.sn_ids = state['snIDs']
self.instances = state['instances']
except Exception as e:
logger.debug("Caught exception while reading state file: {0}".format(e))
raise e
logger.debug("Done reading state from the local state file.")
|
Read the state file, if it exists.
If this script has been run previously, resource IDs will have been written to a
state file. On starting a run, a state file will be looked for before creating new
infrastructure. Information on VPCs, security groups, and subnets are saved, as
well as running instances and their states.
AWS has a maximum number of VPCs per region per account, so we do not want to
clutter users' AWS accounts with security groups and VPCs that will be used only
once.
|
def endline_repl(self, inputstring, reformatting=False, **kwargs):
"""Add end of line comments."""
out = []
ln = 1 # line number
for line in inputstring.splitlines():
add_one_to_ln = False
try:
if line.endswith(lnwrapper):
line, index = line[:-1].rsplit("#", 1)
new_ln = self.get_ref("ln", index)
if new_ln < ln:
raise CoconutInternalException("line number decreased", (ln, new_ln))
ln = new_ln
line = line.rstrip()
add_one_to_ln = True
if not reformatting or add_one_to_ln: # add_one_to_ln here is a proxy for whether there was a ln comment or not
line += self.comments.get(ln, "")
if not reformatting and line.rstrip() and not line.lstrip().startswith("#"):
line += self.ln_comment(ln)
except CoconutInternalException as err:
complain(err)
out.append(line)
if add_one_to_ln:
ln += 1
return "\n".join(out)
|
Add end of line comments.
|
def visit_Tuple(self, node):
'''
A tuple is abstracted as an ordered container of its values
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return a, b')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Tuple)
(a, b) => ['|[0]=a|', '|[1]=b|']
where the |[i]=id| notation means something that
may contain ``id`` at index ``i``.
'''
if node.elts:
elts_aliases = set()
for i, elt in enumerate(node.elts):
elt_aliases = self.visit(elt)
elts_aliases.update(ContainerOf(alias, i)
for alias in elt_aliases)
else:
elts_aliases = None
return self.add(node, elts_aliases)
|
A tuple is abstracted as an ordered container of its values
>>> from pythran import passmanager
>>> pm = passmanager.PassManager('demo')
>>> module = ast.parse('def foo(a, b): return a, b')
>>> result = pm.gather(Aliases, module)
>>> Aliases.dump(result, filter=ast.Tuple)
(a, b) => ['|[0]=a|', '|[1]=b|']
where the |[i]=id| notation means something that
may contain ``id`` at index ``i``.
|
def update_user_trackers(sender, topic, user, request, response, **kwargs):
""" Receiver to mark a topic being viewed as read.
This can result in marking the related forum tracker as read.
"""
TrackingHandler = get_class('forum_tracking.handler', 'TrackingHandler') # noqa
track_handler = TrackingHandler()
track_handler.mark_topic_read(topic, user)
|
Receiver to mark a topic being viewed as read.
This can result in marking the related forum tracker as read.
|
def committer(self) -> Developer:
"""
Return the committer of the commit as a Developer object.
:return: committer
"""
return Developer(self._c_object.committer.name,
self._c_object.committer.email)
|
Return the committer of the commit as a Developer object.
:return: committer
|
def modules(self):
"""Iterates over the defined Modules."""
defmodule = lib.EnvGetNextDefmodule(self._env, ffi.NULL)
while defmodule != ffi.NULL:
yield Module(self._env, defmodule)
defmodule = lib.EnvGetNextDefmodule(self._env, defmodule)
|
Iterates over the defined Modules.
|
def _add_config_regions(nblock_regions, ref_regions, data):
"""Add additional nblock regions based on configured regions to call.
Identifies user defined regions which we should not be analyzing.
"""
input_regions_bed = dd.get_variant_regions(data)
if input_regions_bed:
input_regions = pybedtools.BedTool(input_regions_bed)
# work around problem with single region not subtracted correctly.
if len(input_regions) == 1:
str_regions = str(input_regions[0]).strip()
input_regions = pybedtools.BedTool("%s\n%s" % (str_regions, str_regions),
from_string=True)
input_nblock = ref_regions.subtract(input_regions, nonamecheck=True)
if input_nblock == ref_regions:
raise ValueError("Input variant_region file (%s) "
"excludes all genomic regions. Do the chromosome names "
"in the BED file match your genome (chr1 vs 1)?" % input_regions_bed)
all_intervals = _combine_regions([input_nblock, nblock_regions], ref_regions)
else:
all_intervals = nblock_regions
if "noalt_calling" in dd.get_tools_on(data) or "altcontigs" in dd.get_exclude_regions(data):
from bcbio.heterogeneity import chromhacks
remove_intervals = ref_regions.filter(lambda r: not chromhacks.is_nonalt(r.chrom))
all_intervals = _combine_regions([all_intervals, remove_intervals], ref_regions)
return all_intervals.merge()
|
Add additional nblock regions based on configured regions to call.
Identifies user defined regions which we should not be analyzing.
|
def set_transform_interface_params(spec, input_features, output_features, are_optional = False):
""" Common utilities to set transform interface params.
"""
input_features = _fm.process_or_validate_features(input_features)
output_features = _fm.process_or_validate_features(output_features)
# Add input and output features
for (fname, ftype) in input_features:
input_ = spec.description.input.add()
input_.name = fname
datatypes._set_datatype(input_.type, ftype)
if are_optional:
input_.type.isOptional = are_optional
for (fname, ftype) in output_features:
output_ = spec.description.output.add()
output_.name = fname
datatypes._set_datatype(output_.type, ftype)
return spec
|
Common utilities to set transform interface params.
|
def request_slot(client,
service: JID,
filename: str,
size: int,
content_type: str):
"""
Request an HTTP upload slot.
:param client: The client to request the slot with.
:type client: :class:`aioxmpp.Client`
:param service: Address of the HTTP upload service.
:type service: :class:`~aioxmpp.JID`
:param filename: Name of the file (without path), may be used by the server
to generate the URL.
:type filename: :class:`str`
:param size: Size of the file in bytes
:type size: :class:`int`
:param content_type: The MIME type of the file
:type content_type: :class:`str`
:return: The assigned upload slot.
:rtype: :class:`.xso.Slot`
Sends a :xep:`363` slot request to the XMPP service to obtain HTTP
PUT and GET URLs for a file upload.
The upload slot is returned as a :class:`~.xso.Slot` object.
"""
payload = Request(filename, size, content_type)
return (yield from client.send(IQ(
type_=IQType.GET,
to=service,
payload=payload
)))
|
Request an HTTP upload slot.
:param client: The client to request the slot with.
:type client: :class:`aioxmpp.Client`
:param service: Address of the HTTP upload service.
:type service: :class:`~aioxmpp.JID`
:param filename: Name of the file (without path), may be used by the server
to generate the URL.
:type filename: :class:`str`
:param size: Size of the file in bytes
:type size: :class:`int`
:param content_type: The MIME type of the file
:type content_type: :class:`str`
:return: The assigned upload slot.
:rtype: :class:`.xso.Slot`
Sends a :xep:`363` slot request to the XMPP service to obtain HTTP
PUT and GET URLs for a file upload.
The upload slot is returned as a :class:`~.xso.Slot` object.
|
def seek(self, offset, whence=0):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Possible values for whence are:
0: start of stream (default): offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, sp depending on the parameters,
this operation may be extremely slow.
"""
self._check_can_seek()
# Recalculate offset as an absolute file position.
if whence == 0:
pass
elif whence == 1:
offset = self._pos + offset
elif whence == 2:
# Seeking relative to EOF - we need to know the file's size.
if self._size < 0:
self._read_all(return_data=False)
offset = self._size + offset
else:
raise ValueError("Invalid value for whence: {}".format(whence))
# Make it so that offset is the number of bytes to skip forward.
if offset is None:
#This is not needed on Python 3 where the comparison to self._pos
#will fail with a TypeError.
raise TypeError("Seek offset should be an integer, not None")
if offset < self._pos:
self._rewind()
else:
offset -= self._pos
# Read and discard data until we reach the desired position.
if self._mode != _MODE_READ_EOF:
self._read_block(offset, return_data=False)
return self._pos
|
Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Possible values for whence are:
0: start of stream (default): offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, sp depending on the parameters,
this operation may be extremely slow.
|
def prj_show_path(self, ):
"""Show the dir in the a filebrowser of the project
:returns: None
:rtype: None
:raises: None
"""
f = self.prj_path_le.text()
osinter = ostool.get_interface()
osinter.open_path(f)
|
Show the dir in the a filebrowser of the project
:returns: None
:rtype: None
:raises: None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.