code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def logic(self, data):
"""Parse the data and respond to it appropriately.
Response is returned to the caller and has to be sent
data as a mysensors command string.
"""
try:
msg = Message(data, self)
msg.validate(self.protocol_version)
except (ValueError, vol.Invalid) as exc:
_LOGGER.warning('Not a valid message: %s', exc)
return None
message_type = self.const.MessageType(msg.type)
handler = message_type.get_handler(self.handlers)
ret = handler(msg)
ret = self._route_message(ret)
ret = ret.encode() if ret else None
return ret
|
Parse the data and respond to it appropriately.
Response is returned to the caller and has to be sent
data as a mysensors command string.
|
def detectBlackBerry10Phone(self):
"""Return detection of a Blackberry 10 OS phone
Detects if the current browser is a BlackBerry 10 OS phone.
Excludes the PlayBook.
"""
return UAgentInfo.deviceBB10 in self.__userAgent \
and UAgentInfo.mobile in self.__userAgent
|
Return detection of a Blackberry 10 OS phone
Detects if the current browser is a BlackBerry 10 OS phone.
Excludes the PlayBook.
|
def scroll(clicks, x=None, y=None, pause=None, _pause=True):
"""Performs a scroll of the mouse scroll wheel.
Whether this is a vertical or horizontal scroll depends on the underlying
operating system.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
clicks (int, float): The amount of scrolling to perform.
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
"""
_failSafeCheck()
if type(x) in (tuple, list):
x, y = x[0], x[1]
x, y = position(x, y)
platformModule._scroll(clicks, x, y)
_autoPause(pause, _pause)
|
Performs a scroll of the mouse scroll wheel.
Whether this is a vertical or horizontal scroll depends on the underlying
operating system.
The x and y parameters detail where the mouse event happens. If None, the
current mouse position is used. If a float value, it is rounded down. If
outside the boundaries of the screen, the event happens at edge of the
screen.
Args:
clicks (int, float): The amount of scrolling to perform.
x (int, float, None, tuple, optional): The x position on the screen where the
click happens. None by default. If tuple, this is used for x and y.
y (int, float, None, optional): The y position on the screen where the
click happens. None by default.
Returns:
None
|
def rename(self, name):
"""Rename the table
"""
sql = """ALTER TABLE {s}.{t} RENAME TO {name}
""".format(
s=self.schema, t=self.name, name=name
)
self.engine.execute(sql)
self.table = SQLATable(name, self.metadata, schema=self.schema, autoload=True)
|
Rename the table
|
def get_next_second(intersection, intersections, to_end=True):
"""Gets the next node along the current (second) edge.
.. note::
This is a helper used only by :func:`get_next`, which in
turn is only used by :func:`basic_interior_combine`, which itself
is only used by :func:`combine_intersections`.
Along with :func:`get_next_first`, this function does the majority of the
heavy lifting in :func:`get_next`. **Very** similar to
:func:`get_next_first`, but this works with the second curve while the
other function works with the first.
Args:
intersection (.Intersection): The current intersection.
intersections (List[.Intersection]): List of all detected
intersections, provided as a reference for potential
points to arrive at.
to_end (Optional[bool]): Indicates if the next node should just be
the end of the first edge or :data:`None`.
Returns:
Optional[.Intersection]: The "next" point along a surface of
intersection. This will produce the next intersection along the
current (second) edge or the end of the same edge. If ``to_end`` is
:data:`False` and there are no other intersections along the current
edge, will return :data:`None` (rather than the end of the same edge).
"""
along_edge = None
index_second = intersection.index_second
t = intersection.t
for other_int in intersections:
other_t = other_int.t
if other_int.index_second == index_second and other_t > t:
if along_edge is None or other_t < along_edge.t:
along_edge = other_int
if along_edge is None:
if to_end:
# If there is no other intersection on the edge, just return
# the segment end.
return _intersection_helpers.Intersection(
None,
None,
index_second,
1.0,
interior_curve=CLASSIFICATION_T.SECOND,
)
else:
return None
else:
return along_edge
|
Gets the next node along the current (second) edge.
.. note::
This is a helper used only by :func:`get_next`, which in
turn is only used by :func:`basic_interior_combine`, which itself
is only used by :func:`combine_intersections`.
Along with :func:`get_next_first`, this function does the majority of the
heavy lifting in :func:`get_next`. **Very** similar to
:func:`get_next_first`, but this works with the second curve while the
other function works with the first.
Args:
intersection (.Intersection): The current intersection.
intersections (List[.Intersection]): List of all detected
intersections, provided as a reference for potential
points to arrive at.
to_end (Optional[bool]): Indicates if the next node should just be
the end of the first edge or :data:`None`.
Returns:
Optional[.Intersection]: The "next" point along a surface of
intersection. This will produce the next intersection along the
current (second) edge or the end of the same edge. If ``to_end`` is
:data:`False` and there are no other intersections along the current
edge, will return :data:`None` (rather than the end of the same edge).
|
def BatchLabelVocabulary(self):
"""Return all batch labels as a display list
"""
bsc = getToolByName(self, 'bika_setup_catalog')
ret = []
for p in bsc(portal_type='BatchLabel',
is_active=True,
sort_on='sortable_title'):
ret.append((p.UID, p.Title))
return DisplayList(ret)
|
Return all batch labels as a display list
|
def cross_fade(self, seg1, seg2, duration):
"""Add a linear crossfade to the composition between two
segments.
:param seg1: First segment (fading out)
:type seg1: :py:class:`radiotool.composer.Segment`
:param seg2: Second segment (fading in)
:type seg2: :py:class:`radiotool.composer.Segment`
:param duration: Duration of crossfade (in seconds)
"""
if seg1.comp_location + seg1.duration - seg2.comp_location < 2:
dur = int(duration * seg1.track.samplerate)
if dur % 2 == 1:
dur -= 1
if dur / 2 > seg1.duration:
dur = seg1.duration * 2
if dur / 2 > seg2.duration:
dur = seg2.duration * 2
# we're going to compute the crossfade and then create a RawTrack
# for the resulting frames
if seg2.start - (dur / 2) < 0:
diff = seg2.start
seg2.start = 0
seg2.duration -= diff
seg2.comp_location -= diff
dur = 2 * diff
else:
seg2.start -= (dur / 2)
seg2.duration += (dur / 2)
seg2.comp_location -= (dur / 2)
seg1.duration += (dur / 2)
out_frames = seg1.get_frames(channels=self.channels)[-dur:]
seg1.duration -= dur
in_frames = seg2.get_frames(channels=self.channels)[:dur]
seg2.start += dur
seg2.duration -= dur
seg2.comp_location += dur
# compute the crossfade
in_frames = in_frames[:min(map(len, [in_frames, out_frames]))]
out_frames = out_frames[:min(map(len, [in_frames, out_frames]))]
cf_frames = radiotool.utils.linear(out_frames, in_frames)
# cf_frames = equal_power(out_frames, in_frames)
raw_track = RawTrack(cf_frames, name="crossfade",
samplerate=seg1.track.samplerate)
rs_comp_location = (seg1.comp_location + seg1.duration) /\
float(seg1.track.samplerate)
rs_duration = raw_track.duration / float(raw_track.samplerate)
raw_seg = Segment(raw_track, rs_comp_location, 0.0, rs_duration)
# will this fix a bug?
raw_seg.duration = raw_track.duration
raw_seg.comp_location = seg1.comp_location + seg1.duration
self.add_track(raw_track)
self.add_segment(raw_seg)
return raw_seg
else:
print seg1.comp_location + seg1.duration, seg2.comp_location
raise Exception("Segments must be adjacent"
"to add a crossfade ({}, {})".format(
seg1.comp_location + seg1.duration,
seg2.comp_location))
|
Add a linear crossfade to the composition between two
segments.
:param seg1: First segment (fading out)
:type seg1: :py:class:`radiotool.composer.Segment`
:param seg2: Second segment (fading in)
:type seg2: :py:class:`radiotool.composer.Segment`
:param duration: Duration of crossfade (in seconds)
|
async def findTask(self, *args, **kwargs):
"""
Find Indexed Task
Find a task by index path, returning the highest-rank task with that path. If no
task exists for the given path, this API end-point will respond with a 404 status.
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
"""
return await self._makeApiCall(self.funcinfo["findTask"], *args, **kwargs)
|
Find Indexed Task
Find a task by index path, returning the highest-rank task with that path. If no
task exists for the given path, this API end-point will respond with a 404 status.
This method gives output: ``v1/indexed-task-response.json#``
This method is ``stable``
|
def new_clustered_sortind(x, k=10, row_key=None, cluster_key=None):
"""
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param row_key:
Optional function to act as a sort key for sorting rows within
clusters. Signature should be `scorefunc(a)` where `a` is a 1-D NumPy
array.
:param cluster_key:
Optional function for sorting clusters. Signature is `clusterfunc(a)`
where `a` is a NumPy array containing all rows of `x` for cluster `i`.
It must return a single value.
"""
try:
from sklearn.cluster import MiniBatchKMeans
except ImportError:
raise ImportError('please install scikits.learn for '
'clustering.')
# If integer, do it once and we're done
if isinstance(k, int):
best_k = k
else:
mean_dists = {}
for _k in k:
mbk = MiniBatchKMeans(init='k-means++', n_clusters=_k)
mbk.fit(x)
mean_dists[_k] = mbk.transform(x).mean()
best_k = sorted(mean_dists.items(), key=lambda x: x[1])[-1][0]
mbk = MiniBatchKMeans(init='k-means++', n_clusters=best_k)
mbk.fit(x)
k = best_k
labels = mbk.labels_
scores = np.zeros(labels.shape, dtype=float)
if cluster_key:
# It's easier for calling code to provide something that operates on
# a cluster level, but here it's converted to work on a label level
# that looks in to the array `x`.
def _cluster_key(i):
return cluster_key(x[labels == i, :])
sorted_labels = sorted(range(k), key=_cluster_key)
else:
# Otherwise just use them as-is.
sorted_labels = range(k)
if row_key:
# Again, easier to provide a function to operate on a row. But here we
# need it to accept an index
def _row_key(i):
return row_key(x[i, :])
final_ind = []
breaks = []
pos = 0
for label in sorted_labels:
# which rows in `x` have this label
label_inds = np.nonzero(labels == label)[0]
if row_key:
label_sort_ind = sorted(label_inds, key=_row_key)
else:
label_sort_ind = label_inds
for li in label_sort_ind:
final_ind.append(li)
pos += len(label_inds)
breaks.append(pos)
return np.array(final_ind), np.array(breaks)
|
Uses MiniBatch k-means clustering to cluster matrix into groups.
Each cluster of rows is then sorted by `scorefunc` -- by default, the max
peak height when all rows in a cluster are averaged, or
cluster.mean(axis=0).max().
Returns the index that will sort the rows of `x` and a list of "breaks".
`breaks` is essentially a cumulative row count for each cluster boundary.
In other words, after plotting the array you can use axhline on each
"break" to plot the cluster boundary.
If `k` is a list or tuple, iteratively try each one and select the best
with the lowest mean distance from cluster centers.
:param x: Matrix whose rows are to be clustered
:param k: Number of clusters to create or a list of potential clusters; the
optimum will be chosen from the list
:param row_key:
Optional function to act as a sort key for sorting rows within
clusters. Signature should be `scorefunc(a)` where `a` is a 1-D NumPy
array.
:param cluster_key:
Optional function for sorting clusters. Signature is `clusterfunc(a)`
where `a` is a NumPy array containing all rows of `x` for cluster `i`.
It must return a single value.
|
def _type_single(self, value, _type):
' apply type to the single value '
if value is None or _type in (None, NoneType):
# don't convert null values
# default type is the original type if none set
pass
elif isinstance(value, _type): # or values already of correct type
# normalize all dates to epochs
value = dt2ts(value) if _type in [datetime, date] else value
else:
if _type in (datetime, date):
# normalize all dates to epochs
value = dt2ts(value)
elif _type in (unicode, str):
# make sure all string types are properly unicoded
value = to_encoding(value)
else:
try:
value = _type(value)
except Exception:
value = to_encoding(value)
logger.error("typecast failed: %s(value=%s)" % (
_type.__name__, value))
raise
return value
|
apply type to the single value
|
def from_df(cls, path:PathOrStr, train_df:DataFrame, valid_df:DataFrame, test_df:Optional[DataFrame]=None,
tokenizer:Tokenizer=None, vocab:Vocab=None, classes:Collection[str]=None, text_cols:IntsOrStrs=1,
label_cols:IntsOrStrs=0, label_delim:str=None, chunksize:int=10000, max_vocab:int=60000,
min_freq:int=2, mark_fields:bool=False, include_bos:bool=True, include_eos:bool=False, **kwargs) -> DataBunch:
"Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation."
processor = _get_processor(tokenizer=tokenizer, vocab=vocab, chunksize=chunksize, max_vocab=max_vocab,
min_freq=min_freq, mark_fields=mark_fields,
include_bos=include_bos, include_eos=include_eos)
if classes is None and is_listy(label_cols) and len(label_cols) > 1: classes = label_cols
src = ItemLists(path, TextList.from_df(train_df, path, cols=text_cols, processor=processor),
TextList.from_df(valid_df, path, cols=text_cols, processor=processor))
if cls==TextLMDataBunch: src = src.label_for_lm()
else:
if label_delim is not None: src = src.label_from_df(cols=label_cols, classes=classes, label_delim=label_delim)
else: src = src.label_from_df(cols=label_cols, classes=classes)
if test_df is not None: src.add_test(TextList.from_df(test_df, path, cols=text_cols))
return src.databunch(**kwargs)
|
Create a `TextDataBunch` from DataFrames. `kwargs` are passed to the dataloader creation.
|
def pathsplit(pth, dropext=True):
"""Split a path into a tuple of all of its components."""
if dropext:
pth = os.path.splitext(pth)[0]
parts = os.path.split(pth)
if parts[0] == '':
return parts[1:]
elif len(parts[0]) == 1:
return parts
else:
return pathsplit(parts[0], dropext=False) + parts[1:]
|
Split a path into a tuple of all of its components.
|
def sitetree_breadcrumbs(parser, token):
"""Parses sitetree_breadcrumbs tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_breadcrumbs from "mytree" %}
Used to render breadcrumb path for "mytree" site tree.
2. Four arguments:
{% sitetree_breadcrumbs from "mytree" template "sitetree/mycrumb.html" %}
Used to render breadcrumb path for "mytree" site tree using specific
template "sitetree/mycrumb.html"
"""
tokens = token.split_contents()
use_template = detect_clause(parser, 'template', tokens)
tokens_num = len(tokens)
if tokens_num == 3:
tree_alias = parser.compile_filter(tokens[2])
return sitetree_breadcrumbsNode(tree_alias, use_template)
else:
raise template.TemplateSyntaxError(
'%r tag requires two arguments. E.g. {%% sitetree_breadcrumbs from "mytree" %%}.' % tokens[0])
|
Parses sitetree_breadcrumbs tag parameters.
Two notation types are possible:
1. Two arguments:
{% sitetree_breadcrumbs from "mytree" %}
Used to render breadcrumb path for "mytree" site tree.
2. Four arguments:
{% sitetree_breadcrumbs from "mytree" template "sitetree/mycrumb.html" %}
Used to render breadcrumb path for "mytree" site tree using specific
template "sitetree/mycrumb.html"
|
def name(self, name):
"""
Updates the security labels name.
Args:
name:
"""
self._data['name'] = name
request = self._base_request
request['name'] = name
return self._tc_requests.update(request, owner=self.owner)
|
Updates the security labels name.
Args:
name:
|
def scale_0to1(image_in,
exclude_outliers_below=False,
exclude_outliers_above=False):
"""Scale the two images to [0, 1] based on min/max from both.
Parameters
-----------
image_in : ndarray
Input image
exclude_outliers_{below,above} : float
Lower/upper limit, a value between 0 and 100.
Returns
-------
scaled_image : ndarray
clipped and/or scaled image
"""
min_value = image_in.min()
max_value = image_in.max()
# making a copy to ensure no side-effects
image = image_in.copy()
if exclude_outliers_below:
perctl = float(exclude_outliers_below)
image[image < np.percentile(image, perctl)] = min_value
if exclude_outliers_above:
perctl = float(exclude_outliers_above)
image[image > np.percentile(image, 100.0 - perctl)] = max_value
image = (image - min_value) / (max_value - min_value)
return image
|
Scale the two images to [0, 1] based on min/max from both.
Parameters
-----------
image_in : ndarray
Input image
exclude_outliers_{below,above} : float
Lower/upper limit, a value between 0 and 100.
Returns
-------
scaled_image : ndarray
clipped and/or scaled image
|
def get_similar_entries(context, number=5,
template='zinnia/tags/entries_similar.html'):
"""
Return similar entries.
"""
entry = context.get('entry')
if not entry:
return {'template': template, 'entries': []}
vectors = EntryPublishedVectorBuilder()
entries = vectors.get_related(entry, number)
return {'template': template,
'entries': entries}
|
Return similar entries.
|
def main():
"""Register your own mode and handle method here."""
plugin = Register()
if plugin.args.option == 'sqlserverlocks':
plugin.sqlserverlocks_handle()
else:
plugin.unknown("Unknown actions.")
|
Register your own mode and handle method here.
|
def inbox(self):
""" :class:`Inbox feed <pypump.models.feed.Inbox>` with all
:class:`activities <pypump.models.activity.Activity>`
received by the person, can only be read if logged in as the owner.
Example:
>>> for activity in pump.me.inbox[:2]:
... print(activity.id)
...
https://microca.st/api/activity/BvqXQOwXShSey1HxYuJQBQ
https://pumpyourself.com/api/activity/iQGdnz5-T-auXnbUUdXh-A
"""
if not self.isme:
raise PyPumpException("You can't read other people's inboxes")
if self._inbox is None:
self._inbox = Inbox(self.links['activity-inbox'], pypump=self._pump)
return self._inbox
|
:class:`Inbox feed <pypump.models.feed.Inbox>` with all
:class:`activities <pypump.models.activity.Activity>`
received by the person, can only be read if logged in as the owner.
Example:
>>> for activity in pump.me.inbox[:2]:
... print(activity.id)
...
https://microca.st/api/activity/BvqXQOwXShSey1HxYuJQBQ
https://pumpyourself.com/api/activity/iQGdnz5-T-auXnbUUdXh-A
|
def calc_sasa(dssp_df):
"""
Calculation of SASA utilizing the DSSP program.
DSSP must be installed for biopython to properly call it.
Install using apt-get on Ubuntu
or from: http://swift.cmbi.ru.nl/gv/dssp/
Input: PDB or CIF structure file
Output: SASA (integer) of structure
"""
infodict = {'ssb_sasa': dssp_df.exposure_asa.sum(),
'ssb_mean_rel_exposed': dssp_df.exposure_rsa.mean(),
'ssb_size': len(dssp_df)}
return infodict
|
Calculation of SASA utilizing the DSSP program.
DSSP must be installed for biopython to properly call it.
Install using apt-get on Ubuntu
or from: http://swift.cmbi.ru.nl/gv/dssp/
Input: PDB or CIF structure file
Output: SASA (integer) of structure
|
def select_parser(self, request, parsers):
"""
Selects the appropriated parser which matches to the request's content type.
:param request: The HTTP request.
:param parsers: The lists of parsers.
:return: The parser selected or none.
"""
if not request.content_type:
return parsers[0], parsers[0].mimetype
mimetype = MimeType.parse(request.content_type)
for parser in parsers:
if mimetype.match(parser.mimetype):
return parser, mimetype
return None, None
|
Selects the appropriated parser which matches to the request's content type.
:param request: The HTTP request.
:param parsers: The lists of parsers.
:return: The parser selected or none.
|
def list_models(
self, dataset, max_results=None, page_token=None, retry=DEFAULT_RETRY
):
"""[Beta] List models in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/models/list
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset whose models to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (int):
(Optional) Maximum number of models to return. If not passed,
defaults to a value set by the API.
page_token (str):
(Optional) Token representing a cursor into the models. If
not passed, the API will return the first page of models. The
token marks the beginning of the iterator to be returned and
the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.model.Model` contained
within the requested dataset.
"""
if isinstance(dataset, str):
dataset = DatasetReference.from_string(
dataset, default_project=self.project
)
if not isinstance(dataset, (Dataset, DatasetReference)):
raise TypeError("dataset must be a Dataset, DatasetReference, or string")
path = "%s/models" % dataset.path
result = page_iterator.HTTPIterator(
client=self,
api_request=functools.partial(self._call_api, retry),
path=path,
item_to_value=_item_to_model,
items_key="models",
page_token=page_token,
max_results=max_results,
)
result.dataset = dataset
return result
|
[Beta] List models in the dataset.
See
https://cloud.google.com/bigquery/docs/reference/rest/v2/models/list
Args:
dataset (Union[ \
:class:`~google.cloud.bigquery.dataset.Dataset`, \
:class:`~google.cloud.bigquery.dataset.DatasetReference`, \
str, \
]):
A reference to the dataset whose models to list from the
BigQuery API. If a string is passed in, this method attempts
to create a dataset reference from a string using
:func:`google.cloud.bigquery.dataset.DatasetReference.from_string`.
max_results (int):
(Optional) Maximum number of models to return. If not passed,
defaults to a value set by the API.
page_token (str):
(Optional) Token representing a cursor into the models. If
not passed, the API will return the first page of models. The
token marks the beginning of the iterator to be returned and
the value of the ``page_token`` can be accessed at
``next_page_token`` of the
:class:`~google.api_core.page_iterator.HTTPIterator`.
retry (:class:`google.api_core.retry.Retry`):
(Optional) How to retry the RPC.
Returns:
google.api_core.page_iterator.Iterator:
Iterator of
:class:`~google.cloud.bigquery.model.Model` contained
within the requested dataset.
|
def flush(self, timeout=None, callback=None):
"""Alias for self.client.flush"""
client, scope = self._stack[-1]
if client is not None:
return client.flush(timeout=timeout, callback=callback)
|
Alias for self.client.flush
|
def pin_direction(self, pin):
"""Gets the `ahio.Direction` this pin was set to.
If you're developing a driver, implement _pin_direction(self, pin)
@arg pin the pin you want to see the mode
@returns the `ahio.Direction` the pin is set to
@throw KeyError if pin isn't mapped.
"""
if type(pin) is list:
return [self.pin_direction(p) for p in pin]
pin_id = self._pin_mapping.get(pin, None)
if pin_id:
return self._pin_direction(pin_id)
else:
raise KeyError('Requested pin is not mapped: %s' % pin)
|
Gets the `ahio.Direction` this pin was set to.
If you're developing a driver, implement _pin_direction(self, pin)
@arg pin the pin you want to see the mode
@returns the `ahio.Direction` the pin is set to
@throw KeyError if pin isn't mapped.
|
def script_dir_plus_file(filename, pyobject, follow_symlinks=True):
"""Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended
"""
return join(script_dir(pyobject, follow_symlinks), filename)
|
Get current script's directory and then append a filename
Args:
filename (str): Filename to append to directory path
pyobject (Any): Any Python object in the script
follow_symlinks (Optional[bool]): Follow symlinks or not. Defaults to True.
Returns:
str: Current script's directory and with filename appended
|
def _do_spelling_suggestion(database, query, spelling_query):
"""
Private method that returns a single spelling suggestion based on
`spelling_query` or `query`.
Required arguments:
`database` -- The database to check spelling against
`query` -- The query to check
`spelling_query` -- If not None, this will be checked instead of `query`
Returns a string with a suggested spelling
"""
if spelling_query:
if ' ' in spelling_query:
return ' '.join([database.get_spelling_suggestion(term).decode('utf-8') for term in spelling_query.split()])
else:
return database.get_spelling_suggestion(spelling_query).decode('utf-8')
term_set = set()
for term in query:
for match in re.findall('[^A-Z]+', term.decode('utf-8')): # Ignore field identifiers
term_set.add(database.get_spelling_suggestion(match).decode('utf-8'))
return ' '.join(term_set)
|
Private method that returns a single spelling suggestion based on
`spelling_query` or `query`.
Required arguments:
`database` -- The database to check spelling against
`query` -- The query to check
`spelling_query` -- If not None, this will be checked instead of `query`
Returns a string with a suggested spelling
|
def _to_eng_tuple(number):
"""
Return tuple with mantissa and exponent of number formatted in engineering notation.
:param number: Number
:type number: integer or float
:rtype: tuple
"""
# pylint: disable=W0141
# Helper function: split integer and fractional part of mantissa
# + ljust ensures that integer part in engineering notation has
# at most 3 digits (say if number given is 1E4)
# + rstrip ensures that there is no empty fractional part
split = lambda x, p: (x.ljust(3 + neg, "0")[:p], x[p:].rstrip("0"))
# Convert number to scientific notation, a "constant" format
mant, exp = to_scientific_tuple(number)
mant, neg = mant.replace(".", ""), mant.startswith("-")
# New values
new_mant = ".".join(filter(None, split(mant, 1 + (exp % 3) + neg)))
new_exp = int(3 * math.floor(exp / 3))
return NumComp(new_mant, new_exp)
|
Return tuple with mantissa and exponent of number formatted in engineering notation.
:param number: Number
:type number: integer or float
:rtype: tuple
|
def get_mon_map(service):
"""
Returns the current monitor map.
:param service: six.string_types. The Ceph user name to run the command under
:return: json string. :raise: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
"""
try:
mon_status = check_output(['ceph', '--id', service,
'mon_status', '--format=json'])
if six.PY3:
mon_status = mon_status.decode('UTF-8')
try:
return json.loads(mon_status)
except ValueError as v:
log("Unable to parse mon_status json: {}. Error: {}"
.format(mon_status, str(v)))
raise
except CalledProcessError as e:
log("mon_status command failed with message: {}"
.format(str(e)))
raise
|
Returns the current monitor map.
:param service: six.string_types. The Ceph user name to run the command under
:return: json string. :raise: ValueError if the monmap fails to parse.
Also raises CalledProcessError if our ceph command fails
|
def get_attachment_info(self, attachment):
"""Returns a dictionary of attachment information
"""
attachment_uid = api.get_uid(attachment)
attachment_file = attachment.getAttachmentFile()
attachment_type = attachment.getAttachmentType()
attachment_icon = attachment_file.icon
if callable(attachment_icon):
attachment_icon = attachment_icon()
return {
'keywords': attachment.getAttachmentKeys(),
'size': self.get_attachment_size(attachment),
'name': attachment_file.filename,
'Icon': attachment_icon,
'type': api.get_uid(attachment_type) if attachment_type else '',
'absolute_url': attachment.absolute_url(),
'UID': attachment_uid,
'report_option': attachment.getReportOption(),
'analysis': '',
}
|
Returns a dictionary of attachment information
|
def indent_iterable(elems: Sequence[str], num: int = 2) -> List[str]:
"""Indent an iterable."""
return [" " * num + l for l in elems]
|
Indent an iterable.
|
def describe_cache_parameter_groups(name=None, conn=None, region=None, key=None, keyid=None,
profile=None):
'''
Return details about all (or just one) Elasticache cache clusters.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.describe_cache_parameter_groups
salt myminion boto3_elasticache.describe_cache_parameter_groups myParameterGroup
'''
return _describe_resource(name=name, name_param='CacheParameterGroupName',
res_type='cache_parameter_group', info_node='CacheParameterGroups',
conn=conn, region=region, key=key, keyid=keyid, profile=profile)
|
Return details about all (or just one) Elasticache cache clusters.
Example:
.. code-block:: bash
salt myminion boto3_elasticache.describe_cache_parameter_groups
salt myminion boto3_elasticache.describe_cache_parameter_groups myParameterGroup
|
def run_show_val(obj, name):
"""Generic subcommand value display"""
val = obj.debugger.settings[obj.name]
obj.msg("%s is %s." % (obj.name, obj.cmd.proc._saferepr(val),))
return False
|
Generic subcommand value display
|
def get_bpf_pointer(tcpdump_lines):
"""Create a BPF Pointer for TCPDump filter"""
if conf.use_pypy:
return _legacy_bpf_pointer(tcpdump_lines)
# Allocate BPF instructions
size = int(tcpdump_lines[0])
bpf_insn_a = bpf_insn * size
bip = bpf_insn_a()
# Fill the BPF instruction structures with the byte code
tcpdump_lines = tcpdump_lines[1:]
i = 0
for line in tcpdump_lines:
values = [int(v) for v in line.split()]
bip[i].code = c_ushort(values[0])
bip[i].jt = c_ubyte(values[1])
bip[i].jf = c_ubyte(values[2])
bip[i].k = c_uint(values[3])
i += 1
# Create the BPF program
return bpf_program(size, bip)
|
Create a BPF Pointer for TCPDump filter
|
def join_path_prefix(path, pre_path=None):
"""
If path set and not absolute, append it to pre path (if used)
:param path: path to append
:type path: str | None
:param pre_path: Base path to append to (default: None)
:type pre_path: None | str
:return: Path or appended path
:rtype: str | None
"""
if not path:
return path
if pre_path and not os.path.isabs(path):
return os.path.join(pre_path, path)
return path
|
If path set and not absolute, append it to pre path (if used)
:param path: path to append
:type path: str | None
:param pre_path: Base path to append to (default: None)
:type pre_path: None | str
:return: Path or appended path
:rtype: str | None
|
def assembleimage(patches, pmasks, gridids):
r"""
Assemble an image from a number of patches, patch masks and their grid ids.
Parameters
----------
patches : sequence
Sequence of patches.
pmasks : sequence
Sequence of associated patch masks.
gridids
Sequence of associated grid ids.
Returns
-------
image : ndarray
The patches assembled back into an image of the original proportions.
Examples
--------
Two-dimensional example:
>>> import numpy
>>> from medpy.iterators import CentredPatchIterator
>>> arr = numpy.arange(0, 25).reshape((5,5))
>>> arr
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> patches, pmasks, gridids, _ = zip(*CentredPatchIterator(arr, 2))
>>> result = CentredPatchIterator.assembleimage(patches, pmasks, gridids)
>>> numpy.all(arr == result)
True
Five-dimensional example:
>>> arr = numpy.random.randint(0, 10, range(5, 10))
>>> patches, pmasks, gridids, _ = zip(*CentredPatchIterator(arr, range(2, 7)))
>>> result = CentredPatchIterator.assembleimage(patches, pmasks, gridids)
>>> numpy.all(arr == result)
True
"""
for d in range(patches[0].ndim):
groups = {}
for patch, pmask, gridid in zip(patches, pmasks, gridids):
groupid = gridid[1:]
if not groupid in groups:
groups[groupid] = []
groups[groupid].append((patch, pmask, gridid[0]))
patches = []
gridids = []
pmasks = []
for groupid, group in list(groups.items()):
patches.append(numpy.concatenate([p for p, _, _ in sorted(group, key=itemgetter(2))], d))
pmasks.append(numpy.concatenate([m for _, m, _ in sorted(group, key=itemgetter(2))], d))
gridids.append(groupid)
objs = find_objects(pmasks[0])
if not 1 == len(objs):
raise ValueError('The assembled patch masks contain more than one binary object.')
return patches[0][objs[0]]
|
r"""
Assemble an image from a number of patches, patch masks and their grid ids.
Parameters
----------
patches : sequence
Sequence of patches.
pmasks : sequence
Sequence of associated patch masks.
gridids
Sequence of associated grid ids.
Returns
-------
image : ndarray
The patches assembled back into an image of the original proportions.
Examples
--------
Two-dimensional example:
>>> import numpy
>>> from medpy.iterators import CentredPatchIterator
>>> arr = numpy.arange(0, 25).reshape((5,5))
>>> arr
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> patches, pmasks, gridids, _ = zip(*CentredPatchIterator(arr, 2))
>>> result = CentredPatchIterator.assembleimage(patches, pmasks, gridids)
>>> numpy.all(arr == result)
True
Five-dimensional example:
>>> arr = numpy.random.randint(0, 10, range(5, 10))
>>> patches, pmasks, gridids, _ = zip(*CentredPatchIterator(arr, range(2, 7)))
>>> result = CentredPatchIterator.assembleimage(patches, pmasks, gridids)
>>> numpy.all(arr == result)
True
|
def __read_byte_size(decl, attrs):
"""Using duck typing to set the size instead of in constructor"""
size = attrs.get(XML_AN_SIZE, 0)
# Make sure the size is in bytes instead of bits
decl.byte_size = int(size) / 8
|
Using duck typing to set the size instead of in constructor
|
def cdd(d, k):
""" Conditionally delete key (or list of keys) 'k' from dict 'd' """
if not isinstance(k, list):
k = [k]
for i in k:
if i in d:
d.pop(i)
|
Conditionally delete key (or list of keys) 'k' from dict 'd'
|
def setup_graph(self):
"""
Creates our Graph and figures out, which shared/global model to hook up to.
If we are in a global-model's setup procedure, we do not create
a new graph (return None as the context). We will instead use the already existing local replica graph
of the model.
Returns: None or the graph's as_default()-context.
"""
graph_default_context = None
# Single (non-distributed) mode.
if self.execution_type == "single":
self.graph = tf.Graph()
graph_default_context = self.graph.as_default()
graph_default_context.__enter__()
self.global_model = None
# Distributed tf
elif self.execution_type == "distributed":
# Parameter-server -> Do not build any graph.
if self.distributed_spec["job"] == "ps":
return None
# worker -> construct the global (main) model; the one hosted on the ps,
elif self.distributed_spec["job"] == "worker":
# The local replica model.
if self.is_local_model:
graph = tf.Graph()
graph_default_context = graph.as_default()
graph_default_context.__enter__()
# Now that the graph is created and entered -> deepcopoy ourselves and setup global model first,
# then continue.
self.global_model = deepcopy(self)
# Switch on global construction/setup-mode for the pass to setup().
self.global_model.is_local_model = False
self.global_model.setup()
self.graph = graph
self.as_local_model()
self.scope += '-worker' + str(self.distributed_spec["task_index"])
# The global_model (whose Variables are hosted by the ps).
else:
self.graph = tf.get_default_graph() # lives in the same graph as local model
self.global_model = None
self.device = tf.train.replica_device_setter(
# Place its Variables on the parameter server(s) (round robin).
#ps_device="/job:ps", # default
# Train-ops for the global_model are hosted locally (on this worker's node).
worker_device=self.device,
cluster=self.distributed_spec["cluster_spec"]
)
else:
raise TensorForceError("Unsupported job type: {}!".format(self.distributed_spec["job"]))
else:
raise TensorForceError("Unsupported distributed type: {}!".format(self.distributed_spec["type"]))
return graph_default_context
|
Creates our Graph and figures out, which shared/global model to hook up to.
If we are in a global-model's setup procedure, we do not create
a new graph (return None as the context). We will instead use the already existing local replica graph
of the model.
Returns: None or the graph's as_default()-context.
|
def get_all_subdomains(offset=None, count=None, min_sequence=None, db_path=None, zonefiles_dir=None):
"""
Static method for getting the list of all subdomains
"""
opts = get_blockstack_opts()
if not is_subdomains_enabled(opts):
return []
if db_path is None:
db_path = opts['subdomaindb_path']
if zonefiles_dir is None:
zonefiles_dir = opts['zonefiles']
db = SubdomainDB(db_path, zonefiles_dir)
return db.get_all_subdomains(offset=offset, count=count, min_sequence=None)
|
Static method for getting the list of all subdomains
|
def check_serial_port(name):
"""returns valid COM Port."""
try:
cdc = next(serial.tools.list_ports.grep(name))
return cdc[0]
except StopIteration:
msg = "device {} not found. ".format(name)
msg += "available devices are: "
ports = list(serial.tools.list_ports.comports())
for p in ports:
msg += "{},".format(text_type(p))
raise ValueError(msg)
|
returns valid COM Port.
|
def load_glb(self):
"""Loads a binary gltf file"""
with open(self.path, 'rb') as fd:
# Check header
magic = fd.read(4)
if magic != GLTF_MAGIC_HEADER:
raise ValueError("{} has incorrect header {} != {}".format(self.path, magic, GLTF_MAGIC_HEADER))
version = struct.unpack('<I', fd.read(4))[0]
if version != 2:
raise ValueError("{} has unsupported version {}".format(self.path, version))
# Total file size including headers
_ = struct.unpack('<I', fd.read(4))[0] # noqa
# Chunk 0 - json
chunk_0_length = struct.unpack('<I', fd.read(4))[0]
chunk_0_type = fd.read(4)
if chunk_0_type != b'JSON':
raise ValueError("Expected JSON chunk, not {} in file {}".format(chunk_0_type, self.path))
json_meta = fd.read(chunk_0_length).decode()
# chunk 1 - binary buffer
chunk_1_length = struct.unpack('<I', fd.read(4))[0]
chunk_1_type = fd.read(4)
if chunk_1_type != b'BIN\x00':
raise ValueError("Expected BIN chunk, not {} in file {}".format(chunk_1_type, self.path))
self.meta = GLTFMeta(self.path, json.loads(json_meta), binary_buffer=fd.read(chunk_1_length))
|
Loads a binary gltf file
|
def load(args):
'''
%prog load gff_file fasta_file [--options]
Parses the selected features out of GFF, with subfeatures concatenated.
For example, to get the CDS sequences, do this:
$ %prog load athaliana.gff athaliana.fa --parents mRNA --children CDS
To get 500bp upstream of a genes Transcription Start Site (TSS), do this:
$ %prog load athaliana.gff athaliana.fa --feature=upstream:TSS:500
Switch TSS with TrSS for Translation Start Site.
'''
from datetime import datetime as dt
from jcvi.formats.fasta import Seq, SeqRecord
# can request output fasta sequence id to be picked from following attributes
valid_id_attributes = ["ID", "Name", "Parent", "Alias", "Target"]
p = OptionParser(load.__doc__)
p.add_option("--parents", dest="parents", default="mRNA",
help="list of features to extract, use comma to separate (e.g." + \
"'gene,mRNA') [default: %default]")
p.add_option("--children", dest="children", default="CDS",
help="list of features to extract, use comma to separate (e.g." + \
"'five_prime_UTR,CDS,three_prime_UTR') [default: %default]")
p.add_option("--feature", dest="feature",
help="feature type to extract. e.g. `--feature=CDS` or " + \
"`--feature=upstream:TSS:500` [default: %default]")
p.add_option("--id_attribute", choices=valid_id_attributes,
help="The attribute field to extract and use as FASTA sequence ID " + \
"[default: %default]")
p.add_option("--desc_attribute", default="Note",
help="The attribute field to extract and use as FASTA sequence " + \
"description [default: %default]")
p.add_option("--full_header", default=None, choices=["default", "tair"],
help="Specify if full FASTA header (with seqid, coordinates and datestamp)" + \
" should be generated [default: %default]")
g1 = OptionGroup(p, "Optional parameters (if generating full header)")
g1.add_option("--sep", dest="sep", default=" ", \
help="Specify separator used to delimiter header elements [default: \"%default\"]")
g1.add_option("--datestamp", dest="datestamp", \
help="Specify a datestamp in the format YYYYMMDD or automatically pick `today`" + \
" [default: %default]")
g1.add_option("--conf_class", dest="conf_class", default=False, action="store_true",
help="Specify if `conf_class` attribute should be parsed and placed in the header" + \
" [default: %default]")
p.add_option_group(g1)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(p.print_help())
gff_file, fasta_file = args
if opts.feature:
opts.feature, opts.parent, opts.children, upstream_site, upstream_len, \
flag, error_msg = parse_feature_param(opts.feature)
if flag:
sys.exit(error_msg)
parents = set(opts.parents.split(','))
children_list = set(opts.children.split(','))
"""
In a situation where we want to extract sequence for only the top-level
parent feature, specify feature type of parent == child
"""
skipChildren = True if len(parents.symmetric_difference(children_list)) == 0 \
else False
id_attr = opts.id_attribute
desc_attr = opts.desc_attribute
sep = opts.sep
import gffutils
g = make_index(gff_file)
f = Fasta(fasta_file, index=False)
seqlen = {}
for seqid, size in f.itersizes():
seqlen[seqid] = size
fw = must_open(opts.outfile, "w")
for feat in get_parents(gff_file, parents):
desc = ""
if desc_attr:
fparent = feat.attributes['Parent'][0] \
if 'Parent' in feat.attributes else None
if fparent:
try:
g_fparent = g[fparent]
except gffutils.exceptions.FeatureNotFoundError:
logging.error("{} not found in index .. skipped".format(fparent))
continue
if desc_attr in g_fparent.attributes:
desc = ",".join(g_fparent.attributes[desc_attr])
elif desc_attr in feat.attributes:
desc = ",".join(feat.attributes[desc_attr])
if opts.full_header:
desc_parts = []
desc_parts.append(desc)
if opts.conf_class and 'conf_class' in feat.attributes:
desc_parts.append(feat.attributes['conf_class'][0])
if opts.full_header == "tair":
orient = "REVERSE" if feat.strand == "-" else "FORWARD"
feat_coords = "{0}:{1}-{2} {3} LENGTH=[LEN]".format(feat.seqid, \
feat.start, feat.end, orient)
else:
(s, e) = (feat.start, feat.end) if (feat.strand == "+") \
else (feat.end, feat.start)
feat_coords = "{0}:{1}-{2}".format(feat.seqid, s, e)
desc_parts.append(feat_coords)
datestamp = opts.datestamp if opts.datestamp else \
"{0}{1}{2}".format(dt.now().year, dt.now().month, dt.now().day)
desc_parts.append(datestamp)
desc = sep.join(str(x) for x in desc_parts)
desc = "".join(str(x) for x in (sep, desc)).strip()
if opts.feature == "upstream":
upstream_start, upstream_stop = get_upstream_coords(upstream_site, upstream_len, \
seqlen[feat.seqid], feat, children_list, g)
if not upstream_start or not upstream_stop:
continue
feat_seq = f.sequence(dict(chr=feat.seqid, start=upstream_start,
stop=upstream_stop, strand=feat.strand))
(s, e) = (upstream_start, upstream_stop) \
if feat.strand == "+" else \
(upstream_stop, upstream_start)
upstream_seq_loc = str(feat.seqid) + ":" + str(s) + "-" + str(e)
desc = sep.join(str(x) for x in (desc, upstream_seq_loc, \
"FLANKLEN=" + str(upstream_len)))
else:
children = []
if not skipChildren:
for c in g.children(feat.id, 1):
if c.featuretype not in children_list:
continue
child = f.sequence(dict(chr=c.chrom, start=c.start, stop=c.stop,
strand=c.strand))
children.append((child, c))
if not children:
print("[warning] %s has no children with type %s" \
% (feat.id, ','.join(children_list)), file=sys.stderr)
continue
else:
child = f.sequence(dict(chr=feat.seqid, start=feat.start, stop=feat.end,
strand=feat.strand))
children.append((child, feat))
# sort children in incremental position
children.sort(key=lambda x: x[1].start)
# reverse children if negative strand
if feat.strand == '-':
children.reverse()
feat_seq = ''.join(x[0] for x in children)
desc = desc.replace("\"", "")
id = ",".join(feat.attributes[id_attr]) if id_attr \
and feat.attributes[id_attr] else \
feat.id
if opts.full_header == "tair":
desc = desc.replace("[LEN]", str(len(feat_seq)))
rec = SeqRecord(Seq(feat_seq), id=id, description=desc)
SeqIO.write([rec], fw, "fasta")
fw.flush()
|
%prog load gff_file fasta_file [--options]
Parses the selected features out of GFF, with subfeatures concatenated.
For example, to get the CDS sequences, do this:
$ %prog load athaliana.gff athaliana.fa --parents mRNA --children CDS
To get 500bp upstream of a genes Transcription Start Site (TSS), do this:
$ %prog load athaliana.gff athaliana.fa --feature=upstream:TSS:500
Switch TSS with TrSS for Translation Start Site.
|
def call_somatic(tumor_name, normal_name):
"""Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag.
Works from stdin and writes to stdout, finding positions of tumor and normal samples.
Uses MuTect like somatic filter based on implementation in speedseq:
https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62
Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores
except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score).
For tumors, we retrieve the best likelihood to not be reference (the first GL) and
for normal, the best likelhood to be reference.
After calculating the likelihoods, we compare these to thresholds to pass variants
at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations.
We also check that the frequency of the tumor exceeds the frequency of the normal by
a threshold to avoid calls that are low frequency in both tumor and normal. This supports
both FreeBayes and VarDict output frequencies.
"""
# Thresholds are like phred scores, so 3.5 = phred35
tumor_thresh, normal_thresh = 3.5, 3.5
new_headers = ['##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic event">\n',
('##FILTER=<ID=REJECT,Description="Not somatic due to normal call frequency '
'or phred likelihoods: tumor: %s, normal %s.">\n')
% (int(tumor_thresh * 10), int(normal_thresh * 10))]
def _output_filter_line(line, indexes):
parts = line.split("\t")
if _check_lods(parts, tumor_thresh, normal_thresh, indexes) and _check_freqs(parts, indexes):
parts[7] = parts[7] + ";SOMATIC"
else:
if parts[6] in set([".", "PASS"]):
parts[6] = "REJECT"
else:
parts[6] += ";REJECT"
line = "\t".join(parts)
sys.stdout.write(line)
def _write_header(header):
for hline in header[:-1] + new_headers + [header[-1]]:
sys.stdout.write(hline)
header = []
indexes = None
for line in sys.stdin:
if not indexes:
if line.startswith("#"):
header.append(line)
else:
parts = header[-1].rstrip().split("\t")
indexes = {"tumor": parts.index(tumor_name), "normal": parts.index(normal_name)}
_write_header(header)
_output_filter_line(line, indexes)
else:
_output_filter_line(line, indexes)
# no calls, only output the header
if not indexes:
_write_header(header)
|
Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag.
Works from stdin and writes to stdout, finding positions of tumor and normal samples.
Uses MuTect like somatic filter based on implementation in speedseq:
https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62
Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores
except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score).
For tumors, we retrieve the best likelihood to not be reference (the first GL) and
for normal, the best likelhood to be reference.
After calculating the likelihoods, we compare these to thresholds to pass variants
at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations.
We also check that the frequency of the tumor exceeds the frequency of the normal by
a threshold to avoid calls that are low frequency in both tumor and normal. This supports
both FreeBayes and VarDict output frequencies.
|
def declare_alias(self, name):
"""Insert a Python function into this Namespace with an
explicitly-given name, but detect its argument count automatically.
"""
def decorator(f):
self._auto_register_function(f, name)
return f
return decorator
|
Insert a Python function into this Namespace with an
explicitly-given name, but detect its argument count automatically.
|
def point_distance_ellipsode(point1,point2):
"""
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
"""
a = 6378137
f = 1/298.25722
b = a - a*f
e = math.sqrt((a*a-b*b)/(a*a))
lon1 = point1['coordinates'][0]
lat1 = point1['coordinates'][1]
lon2 = point1['coordinates'][0]
lat2 = point2['coordinates'][1]
M = a*(1-e*e)*math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),-1.5)
N = a/(math.pow(1-math.pow(e*math.sin(number2radius(lat1)),2),0.5))
distance_lat = M*number2radius(lat2-lat1)
distance_lon = N*math.cos(number2radius(lat1))*(lon2-lon1)*3600*math.sin(1/3600*math.pi/180)
return math.sqrt(distance_lat*distance_lat+distance_lon*distance_lon)
|
calculate the distance between two points on the ellipsode based on point1
Keyword arguments:
point1 -- point one geojson object
point2 -- point two geojson object
return distance
|
def _get(self, node, key):
""" get value inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
:return:
BLANK_NODE if does not exist, otherwise value or hash
"""
node_type = self._get_node_type(node)
if node_type == NODE_TYPE_BLANK:
return BLANK_NODE
if node_type == NODE_TYPE_BRANCH:
# already reach the expected node
if not key:
return node[-1]
sub_node = self._decode_to_node(node[key[0]])
return self._get(sub_node, key[1:])
# key value node
curr_key = without_terminator(unpack_to_nibbles(node[0]))
if node_type == NODE_TYPE_LEAF:
return node[1] if key == curr_key else BLANK_NODE
if node_type == NODE_TYPE_EXTENSION:
# traverse child nodes
if starts_with(key, curr_key):
sub_node = self._decode_to_node(node[1])
return self._get(sub_node, key[len(curr_key):])
else:
return BLANK_NODE
|
get value inside a node
:param node: node in form of list, or BLANK_NODE
:param key: nibble list without terminator
:return:
BLANK_NODE if does not exist, otherwise value or hash
|
def _handle_codeblock(self, match):
"""
match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
"""
from pygments.lexers import get_lexer_by_name
# section header
yield match.start(1), String , match.group(1)
yield match.start(2), String , match.group(2)
yield match.start(3), Text , match.group(3)
# lookup lexer if wanted and existing
lexer = None
if self.handlecodeblocks:
try:
lexer = get_lexer_by_name( match.group(2).strip() )
except ClassNotFound:
pass
code = match.group(4)
# no lexer for this language. handle it like it was a code block
if lexer is None:
yield match.start(4), String, code
return
for item in do_insertions([], lexer.get_tokens_unprocessed(code)):
yield item
yield match.start(5), String , match.group(5)
|
match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
|
def build_props(self):
"""Build the props dictionary."""
props = {}
if self.filters:
props["filters"] = {}
for grp in self.filters:
props["filters"][grp] = [f.params for f in self.filters[grp]]
if self.charts:
props["charts"] = [c.params for c in self.charts]
props["type"] = self.layout
return props
|
Build the props dictionary.
|
def update(self):
'''
Updates LaserData.
'''
if self.hasproxy():
sonarD = SonarData()
range = 0
data = self.proxy.getSonarData()
sonarD.range = data.range
sonarD.maxAngle = data.maxAngle
sonarD.minAngle = data.minAngle
sonarD.maxRange = data.maxRange
sonarD.minRange = data.minRange
self.lock.acquire()
self.sonar = sonarD
self.lock.release()
|
Updates LaserData.
|
def _readXput(self, fileCards, directory, session, spatial=False, spatialReferenceID=4236, replaceParamFile=None):
"""
GSSHAPY Project Read Files from File Method
"""
## NOTE: This function is dependent on the project file being read first
# Read Input/Output Files
for card in self.projectCards:
if (card.name in fileCards) and self._noneOrNumValue(card.value) and fileCards[card.name]:
fileIO = fileCards[card.name]
filename = card.value.strip('"')
# Invoke read method on each file
self._invokeRead(fileIO=fileIO,
directory=directory,
filename=filename,
session=session,
spatial=spatial,
spatialReferenceID=spatialReferenceID,
replaceParamFile=replaceParamFile)
|
GSSHAPY Project Read Files from File Method
|
def flat(self, obj, mask=0):
'''Return the aligned flat size.
'''
s = self.base
if self.leng and self.item > 0: # include items
s += self.leng(obj) * self.item
if _getsizeof: # _getsizeof prevails
s = _getsizeof(obj, s)
if mask: # align
s = (s + mask) & ~mask
return s
|
Return the aligned flat size.
|
def _query_helper(self, by=None):
"""
Internal helper for preparing queries.
"""
if by is None:
primary_keys = self.table.primary_key.columns.keys()
if len(primary_keys) > 1:
warnings.warn("WARNING: MORE THAN 1 PRIMARY KEY FOR TABLE %s. "
"USING THE FIRST KEY %s." %
(self.table.name, primary_keys[0]))
if not primary_keys:
raise NoPrimaryKeyException("Table %s needs a primary key for"
"the .last() method to work properly. "
"Alternatively, specify an ORDER BY "
"column with the by= argument. " %
self.table.name)
id_col = primary_keys[0]
else:
id_col = by
if self.column is None:
col = "*"
else:
col = self.column.name
return col, id_col
|
Internal helper for preparing queries.
|
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
"""
if isinstance(other, Context):
return super(RowSparseNDArray, self).copyto(other)
elif isinstance(other, NDArray):
stype = other.stype
if stype in ('default', 'row_sparse'):
return super(RowSparseNDArray, self).copyto(other)
else:
raise TypeError('copyto does not support destination NDArray stype ' + str(stype))
else:
raise TypeError('copyto does not support type ' + str(type(other)))
|
Copies the value of this array to another array.
If ``other`` is a ``NDArray`` or ``RowSparseNDArray`` object, then ``other.shape``
and ``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``RowSparseNDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or RowSparseNDArray or Context
The destination array or context.
Returns
-------
NDArray or RowSparseNDArray
The copied array. If ``other`` is an ``NDArray`` or ``RowSparseNDArray``, then the
return value and ``other`` will point to the same ``NDArray`` or ``RowSparseNDArray``.
|
def _log_graphql_error(self, query, data):
'''Log a ``{"errors": [...]}`` GraphQL return and return itself.
:param query: the GraphQL query that triggered the result.
:type query: str
:param data: the decoded JSON object.
:type data: dict
:return: the input ``data``
:rtype: dict
'''
if isinstance(query, bytes): # pragma: no cover
query = query.decode('utf-8')
elif not isinstance(query, str): # pragma: no cover
# allows sgqlc.operation.Operation to be passed
# and generate compact representation of the queries
query = bytes(query).decode('utf-8')
data = self._fixup_graphql_error(data)
errors = data['errors']
self.logger.error('GraphQL query failed with %s errors', len(errors))
for i, error in enumerate(errors):
paths = error.get('path')
if paths:
paths = ' ' + '/'.join(str(path) for path in paths)
else:
paths = ''
self.logger.info('Error #{}{}:'.format(i, paths))
for ln in error.get('message', '').split('\n'):
self.logger.info(' | {}'.format(ln))
s = self.snippet(query, error.get('locations'))
if s:
self.logger.info(' -')
self.logger.info(' | Locations:')
for ln in s:
self.logger.info(' | {}'.format(ln))
return data
|
Log a ``{"errors": [...]}`` GraphQL return and return itself.
:param query: the GraphQL query that triggered the result.
:type query: str
:param data: the decoded JSON object.
:type data: dict
:return: the input ``data``
:rtype: dict
|
def t_binaryValue(t):
r'[+-]?[0-9]+[bB]'
# We must match [0-9], and then check the validity of the binary number.
# If we match [0-1], the invalid binary number "2b" would match
# 'decimalValue' 2 and 'IDENTIFIER 'b'.
if re.search(r'[2-9]', t.value) is not None:
msg = _format("Invalid binary number {0!A}", t.value)
t.lexer.last_msg = msg
t.type = 'error'
# Setting error causes the value to be automatically skipped
else:
t.value = int(t.value[0:-1], 2)
return t
|
r'[+-]?[0-9]+[bB]
|
def analyze(problem, Y, X, M=10, print_to_console=False, seed=None):
"""Performs the Random Balanced Design - Fourier Amplitude Sensitivity Test
(RBD-FAST) on model outputs.
Returns a dictionary with keys 'S1', where each entry is a list of
size D (the number of parameters) containing the indices in the same order
as the parameter file.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
X : numpy.array
A NumPy array containing the model inputs
M : int
The interference parameter, i.e., the number of harmonics to sum in
the Fourier series decomposition (default 10)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] S. Tarantola, D. Gatelli and T. Mara (2006) "Random Balance Designs
for the Estimation of First Order Global Sensitivity Indices",
Reliability Engineering and System Safety, 91:6, 717-727
.. [2] Elmar Plischke (2010) "An effective algorithm for computing global
sensitivity indices (EASI) Reliability Engineering & System Safety",
95:4, 354-360. doi:10.1016/j.ress.2009.11.005
.. [3] Jean-Yves Tissot, Clémentine Prieur (2012) "Bias correction for the
estimation of sensitivity indices based on random balance designs.",
Reliability Engineering and System Safety, Elsevier, 107, 205-213.
doi:10.1016/j.ress.2012.06.010
.. [4] Jeanne Goffart, Mickael Rabouille & Nathan Mendes (2015)
"Uncertainty and sensitivity analysis applied to hygrothermal
simulation of a brick building in a hot and humid climate",
Journal of Building Performance Simulation.
doi:10.1080/19401493.2015.1112430
Examples
--------
>>> X = latin.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = rbd_fast.analyze(problem, Y, X, print_to_console=False)
"""
if seed:
np.random.seed(seed)
D = problem['num_vars']
N = Y.size
# Calculate and Output the First Order Value
if print_to_console:
print("Parameter First")
Si = ResultDict((k, [None] * D) for k in ['S1'])
Si['names'] = problem['names']
for i in range(D):
S1 = compute_first_order(permute_outputs(Y, X[:, i]), M)
S1 = unskew_S1(S1, M, N)
Si['S1'][i] = S1
if print_to_console:
print("%s %g" %
(problem['names'][i].ljust(9), Si['S1'][i]))
return Si
|
Performs the Random Balanced Design - Fourier Amplitude Sensitivity Test
(RBD-FAST) on model outputs.
Returns a dictionary with keys 'S1', where each entry is a list of
size D (the number of parameters) containing the indices in the same order
as the parameter file.
Parameters
----------
problem : dict
The problem definition
Y : numpy.array
A NumPy array containing the model outputs
X : numpy.array
A NumPy array containing the model inputs
M : int
The interference parameter, i.e., the number of harmonics to sum in
the Fourier series decomposition (default 10)
print_to_console : bool
Print results directly to console (default False)
References
----------
.. [1] S. Tarantola, D. Gatelli and T. Mara (2006) "Random Balance Designs
for the Estimation of First Order Global Sensitivity Indices",
Reliability Engineering and System Safety, 91:6, 717-727
.. [2] Elmar Plischke (2010) "An effective algorithm for computing global
sensitivity indices (EASI) Reliability Engineering & System Safety",
95:4, 354-360. doi:10.1016/j.ress.2009.11.005
.. [3] Jean-Yves Tissot, Clémentine Prieur (2012) "Bias correction for the
estimation of sensitivity indices based on random balance designs.",
Reliability Engineering and System Safety, Elsevier, 107, 205-213.
doi:10.1016/j.ress.2012.06.010
.. [4] Jeanne Goffart, Mickael Rabouille & Nathan Mendes (2015)
"Uncertainty and sensitivity analysis applied to hygrothermal
simulation of a brick building in a hot and humid climate",
Journal of Building Performance Simulation.
doi:10.1080/19401493.2015.1112430
Examples
--------
>>> X = latin.sample(problem, 1000)
>>> Y = Ishigami.evaluate(X)
>>> Si = rbd_fast.analyze(problem, Y, X, print_to_console=False)
|
def exponentialRDD(sc, mean, size, numPartitions=None, seed=None):
"""
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or 1 / lambda, for the Exponential distribution.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Exp(mean).
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
"""
return callMLlibFunc("exponentialRDD", sc._jsc, float(mean), size, numPartitions, seed)
|
Generates an RDD comprised of i.i.d. samples from the Exponential
distribution with the input mean.
:param sc: SparkContext used to create the RDD.
:param mean: Mean, or 1 / lambda, for the Exponential distribution.
:param size: Size of the RDD.
:param numPartitions: Number of partitions in the RDD (default: `sc.defaultParallelism`).
:param seed: Random seed (default: a random long integer).
:return: RDD of float comprised of i.i.d. samples ~ Exp(mean).
>>> mean = 2.0
>>> x = RandomRDDs.exponentialRDD(sc, mean, 1000, seed=2)
>>> stats = x.stats()
>>> stats.count()
1000
>>> abs(stats.mean() - mean) < 0.5
True
>>> from math import sqrt
>>> abs(stats.stdev() - sqrt(mean)) < 0.5
True
|
def get_android_resources(self):
"""
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file
:rtype: :class:`ARSCParser`
"""
try:
return self.arsc["resources.arsc"]
except KeyError:
self.arsc["resources.arsc"] = ARSCParser(self.zip.read(
"resources.arsc"))
return self.arsc["resources.arsc"]
|
Return the :class:`ARSCParser` object which corresponds to the resources.arsc file
:rtype: :class:`ARSCParser`
|
def transpose(a, axes=None):
"""Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
"""
if isinstance(a, np.ndarray):
return np.transpose(a, axes)
elif isinstance(a, RemoteArray):
return a.transpose(*axes)
elif isinstance(a, Remote):
return _remote_to_array(a).transpose(*axes)
elif isinstance(a, DistArray):
if axes is None:
axes = range(a.ndim - 1, -1, -1)
axes = list(axes)
if len(set(axes)) < len(axes):
raise ValueError("repeated axis in transpose")
if sorted(axes) != list(range(a.ndim)):
raise ValueError("axes don't match array")
distaxis = a._distaxis
new_distaxis = axes.index(distaxis)
new_subarrays = [ra.transpose(*axes) for ra in a._subarrays]
return DistArray(new_subarrays, new_distaxis)
else:
return np.transpose(a, axes)
|
Returns a view of the array with axes transposed.
For a 1-D array, this has no effect.
For a 2-D array, this is the usual matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted
Args:
a (array_like): Input array.
axes (list of int, optional): By default, reverse the dimensions,
otherwise permute the axes according to the values given.
|
def _ignore_sql(self, query):
"""Check to see if we should ignore the sql query."""
return any([
re.search(pattern, query.get('sql')) for pattern in QC_SETTINGS['IGNORE_SQL_PATTERNS']
])
|
Check to see if we should ignore the sql query.
|
def get_minutes_description(self):
"""Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
"""
return self.get_segment_description(
self._expression_parts[1],
_("every minute"),
lambda s: s,
lambda s: _("every {0} minutes").format(s),
lambda s: _("minutes {0} through {1} past the hour"),
lambda s: '' if s == "0" else _("at {0} minutes past the hour")
)
|
Generates a description for only the MINUTE portion of the expression
Returns:
The MINUTE description
|
def logical_chassis_fwdl_status_output_cluster_fwdl_entries_fwdl_entries_blade_swbd(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
logical_chassis_fwdl_status = ET.Element("logical_chassis_fwdl_status")
config = logical_chassis_fwdl_status
output = ET.SubElement(logical_chassis_fwdl_status, "output")
cluster_fwdl_entries = ET.SubElement(output, "cluster-fwdl-entries")
fwdl_entries = ET.SubElement(cluster_fwdl_entries, "fwdl-entries")
blade_swbd = ET.SubElement(fwdl_entries, "blade-swbd")
blade_swbd.text = kwargs.pop('blade_swbd')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _expand_colspan_rowspan(self, rows):
"""
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
Returns
-------
list of list
Each returned row is a list of str text.
Notes
-----
Any cell with ``rowspan`` or ``colspan`` will have its contents copied
to subsequent cells.
"""
all_texts = [] # list of rows, each a list of str
remainder = [] # list of (index, text, nrows)
for tr in rows:
texts = [] # the output for this row
next_remainder = []
index = 0
tds = self._parse_td(tr)
for td in tds:
# Append texts from previous rows with rowspan>1 that come
# before this <td>
while remainder and remainder[0][0] <= index:
prev_i, prev_text, prev_rowspan = remainder.pop(0)
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text,
prev_rowspan - 1))
index += 1
# Append the text from this <td>, colspan times
text = _remove_whitespace(self._text_getter(td))
rowspan = int(self._attr_getter(td, 'rowspan') or 1)
colspan = int(self._attr_getter(td, 'colspan') or 1)
for _ in range(colspan):
texts.append(text)
if rowspan > 1:
next_remainder.append((index, text, rowspan - 1))
index += 1
# Append texts from previous rows at the final position
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text,
prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
# Append rows that only appear because the previous row had non-1
# rowspan
while remainder:
next_remainder = []
texts = []
for prev_i, prev_text, prev_rowspan in remainder:
texts.append(prev_text)
if prev_rowspan > 1:
next_remainder.append((prev_i, prev_text,
prev_rowspan - 1))
all_texts.append(texts)
remainder = next_remainder
return all_texts
|
Given a list of <tr>s, return a list of text rows.
Parameters
----------
rows : list of node-like
List of <tr>s
Returns
-------
list of list
Each returned row is a list of str text.
Notes
-----
Any cell with ``rowspan`` or ``colspan`` will have its contents copied
to subsequent cells.
|
def learn(self, bottomUpInput, enableInference=None):
"""
TODO: document
:param bottomUpInput:
:param enableInference:
:return:
"""
return self.compute(bottomUpInput, enableLearn=True,
enableInference=enableInference)
|
TODO: document
:param bottomUpInput:
:param enableInference:
:return:
|
def delete(self, folder_id):
"""
Delete a specific campaign folder, and mark all the campaigns in the
folder as ‘unfiled’.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
"""
self.folder_id = folder_id
return self._mc_client._delete(url=self._build_path(folder_id))
|
Delete a specific campaign folder, and mark all the campaigns in the
folder as ‘unfiled’.
:param folder_id: The unique id for the campaign folder.
:type folder_id: :py:class:`str`
|
def flipFlopFsm(self, fsmTable, *varBinds, **context):
"""Read, modify, create or remove Managed Objects Instances.
Given one or more py:class:`~pysnmp.smi.rfc1902.ObjectType`, recursively
transitions corresponding Managed Objects Instances through the Finite State
Machine (FSM) states till it reaches its final stop state.
Parameters
----------
fsmTable: :py:class:`dict`
A map of (`state`, `status`) -> `state` representing FSM transition matrix.
See :py:class:`RowStatus` for FSM transition logic.
varBinds: :py:class:`tuple` of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects
representing Managed Objects Instances to work with.
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
* `acFun` (callable) - user-supplied callable that is invoked to
authorize access to the requested Managed Object Instance. If
not supplied, no access control will be performed.
Notes
-----
The callback functions (e.g. `cbFun`, `acFun`) have the same signature
as this method where `varBind` contains the new Managed Object Instance
value.
In case of errors, the `errors` key in the `context` dict will contain
a sequence of `dict` objects describing one or more errors that occur.
Such error `dict` will have the `error`, `idx` and `state` keys providing
the details concerning the error, for which variable-binding and in what
state the system has failed.
"""
count = [0]
cbFun = context.get('cbFun')
def _cbFun(varBind, **context):
idx = context.pop('idx', None)
err = context.pop('error', None)
if err:
# Move other errors into the errors sequence
errors = context['errors']
errors.append(
{'error': err,
'idx': idx,
'varbind': varBind,
'state': context['state']})
context['status'] = self.STATUS_ERROR
if idx is None:
if cbFun:
cbFun((), **context)
return
_varBinds = context['varBinds']
_varBinds[idx] = varBind
count[0] += 1
debug.logger & debug.FLAG_INS and debug.logger(
'_cbFun: var-bind %d, processed %d, expected '
'%d' % (idx, count[0], len(varBinds)))
if count[0] < len(varBinds):
return
debug.logger & debug.FLAG_INS and debug.logger(
'_cbFun: finished, output var-binds %r' % (_varBinds,))
self.flipFlopFsm(fsmTable, *varBinds, **dict(context, cbFun=cbFun))
debug.logger & debug.FLAG_INS and debug.logger(
'flipFlopFsm: input var-binds %r' % (varBinds,))
mibTree, = self.mibBuilder.importSymbols('SNMPv2-SMI', 'iso')
try:
state = context['state']
status = context['status']
instances = context['instances']
errors = context['errors']
_varBinds = context['varBinds']
except KeyError:
state, status = self.STATE_START, self.STATUS_OK
instances = {}
errors = []
_varBinds = list(varBinds)
self._indexMib()
debug.logger & debug.FLAG_INS and debug.logger(
'flipFlopFsm: current state %s, status %s' % (state, status))
try:
newState = fsmTable[(state, status)]
except KeyError:
try:
newState = fsmTable[(self.STATE_ANY, status)]
except KeyError:
raise error.SmiError(
'Unresolved FSM state %s, %s' % (state, status))
debug.logger & debug.FLAG_INS and debug.logger(
'flipFlopFsm: state %s status %s -> transitioned into state '
'%s' % (state, status, newState))
state = newState
if state == self.STATE_STOP:
context.pop('state', None)
context.pop('status', None)
context.pop('instances', None)
context.pop('varBinds', None)
if cbFun:
cbFun(_varBinds, **context)
return
# the case of no var-binds
if not varBinds:
_cbFun(None, **context)
return
actionFun = getattr(mibTree, state, None)
if not actionFun:
raise error.SmiError(
'Unsupported state handler %s at '
'%s' % (state, self))
for idx, varBind in enumerate(varBinds):
actionFun(
varBind,
**dict(context, cbFun=_cbFun, state=state, status=status,
idx=idx, total=len(varBinds), instances=instances,
errors=errors, varBinds=_varBinds, nextName=None))
debug.logger & debug.FLAG_INS and debug.logger(
'flipFlopFsm: func %s initiated for %r' % (actionFun, varBind))
|
Read, modify, create or remove Managed Objects Instances.
Given one or more py:class:`~pysnmp.smi.rfc1902.ObjectType`, recursively
transitions corresponding Managed Objects Instances through the Finite State
Machine (FSM) states till it reaches its final stop state.
Parameters
----------
fsmTable: :py:class:`dict`
A map of (`state`, `status`) -> `state` representing FSM transition matrix.
See :py:class:`RowStatus` for FSM transition logic.
varBinds: :py:class:`tuple` of :py:class:`~pysnmp.smi.rfc1902.ObjectType` objects
representing Managed Objects Instances to work with.
Other Parameters
----------------
\*\*context:
Query parameters:
* `cbFun` (callable) - user-supplied callable that is invoked to
pass the new value of the Managed Object Instance or an error.
* `acFun` (callable) - user-supplied callable that is invoked to
authorize access to the requested Managed Object Instance. If
not supplied, no access control will be performed.
Notes
-----
The callback functions (e.g. `cbFun`, `acFun`) have the same signature
as this method where `varBind` contains the new Managed Object Instance
value.
In case of errors, the `errors` key in the `context` dict will contain
a sequence of `dict` objects describing one or more errors that occur.
Such error `dict` will have the `error`, `idx` and `state` keys providing
the details concerning the error, for which variable-binding and in what
state the system has failed.
|
def get_arguments(self):
"""
Extracts the specific arguments of this CLI
"""
ApiCli.get_arguments(self)
if self.args.hostGroupId is not None:
self.hostGroupId = self.args.hostGroupId
if self.args.force is not None:
self.force = self.args.force
if self.force:
self.url_parameters = {"forceRemove": True}
self.path = "v1/hostgroup/{0}".format(str(self.hostGroupId))
|
Extracts the specific arguments of this CLI
|
def __msgc_step3_discontinuity_localization(self):
"""
Estimate discontinuity in basis of low resolution image segmentation.
:return: discontinuity in low resolution
"""
import scipy
start = self._start_time
seg = 1 - self.segmentation.astype(np.int8)
self.stats["low level object voxels"] = np.sum(seg)
self.stats["low level image voxels"] = np.prod(seg.shape)
# in seg is now stored low resolution segmentation
# back to normal parameters
# step 2: discontinuity localization
# self.segparams = sparams_hi
seg_border = scipy.ndimage.filters.laplace(seg, mode="constant")
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# logger.debug(str(np.max(seg_border)))
# logger.debug(str(np.min(seg_border)))
seg_border[seg_border != 0] = 1
logger.debug("seg_border: %s", scipy.stats.describe(seg_border, axis=None))
# scipy.ndimage.morphology.distance_transform_edt
boundary_dilatation_distance = self.segparams["boundary_dilatation_distance"]
seg = scipy.ndimage.morphology.binary_dilation(
seg_border,
# seg,
np.ones(
[
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
(boundary_dilatation_distance * 2) + 1,
]
),
)
if self.keep_temp_properties:
self.temp_msgc_lowres_discontinuity = seg
else:
self.temp_msgc_lowres_discontinuity = None
if self.debug_images:
import sed3
pd = sed3.sed3(seg_border) # ), contour=seg)
pd.show()
pd = sed3.sed3(seg) # ), contour=seg)
pd.show()
# segzoom = scipy.ndimage.interpolation.zoom(seg.astype('float'), zoom,
# order=0).astype('int8')
self.stats["t3"] = time.time() - start
return seg
|
Estimate discontinuity in basis of low resolution image segmentation.
:return: discontinuity in low resolution
|
def full_parent_name(self):
"""Retrieves the fully qualified parent command name.
This the base command name required to execute it. For example,
in ``?one two three`` the parent name would be ``one two``.
"""
entries = []
command = self
while command.parent is not None:
command = command.parent
entries.append(command.name)
return ' '.join(reversed(entries))
|
Retrieves the fully qualified parent command name.
This the base command name required to execute it. For example,
in ``?one two three`` the parent name would be ``one two``.
|
def area(self):
"""
The surface area of the primitive extrusion.
Calculated from polygon and height to avoid mesh creation.
Returns
----------
area: float, surface area of 3D extrusion
"""
# area of the sides of the extrusion
area = abs(self.primitive.height *
self.primitive.polygon.length)
# area of the two caps of the extrusion
area += self.primitive.polygon.area * 2
return area
|
The surface area of the primitive extrusion.
Calculated from polygon and height to avoid mesh creation.
Returns
----------
area: float, surface area of 3D extrusion
|
def colorize(text, messageType=None):
"""
Function that colorizes a message.
Args:
-----
text: The string to be colorized.
messageType: Possible options include "ERROR", "WARNING", "SUCCESS",
"INFO" or "BOLD".
Returns:
--------
string: Colorized if the option is correct, including a tag at the end
to reset the formatting.
"""
formattedText = str(text)
# Set colors
if "ERROR" in messageType:
formattedText = colorama.Fore.RED + formattedText
elif "WARNING" in messageType:
formattedText = colorama.Fore.YELLOW + formattedText
elif "SUCCESS" in messageType:
formattedText = colorama.Fore.GREEN + formattedText
elif "INFO" in messageType:
formattedText = colorama.Fore.BLUE + formattedText
# Set emphashis mode
if "BOLD" in messageType:
formattedText = colorama.Style.BRIGHT + formattedText
return formattedText + colorama.Style.RESET_ALL
|
Function that colorizes a message.
Args:
-----
text: The string to be colorized.
messageType: Possible options include "ERROR", "WARNING", "SUCCESS",
"INFO" or "BOLD".
Returns:
--------
string: Colorized if the option is correct, including a tag at the end
to reset the formatting.
|
def url_for(self, *args, **kwargs):
"""Construct url for route with additional params."""
return yarl.URL(self.url(parts=kwargs))
|
Construct url for route with additional params.
|
def format_cffi_externs(cls):
"""Generate stubs for the cffi bindings from @_extern_decl methods."""
extern_decls = [
f.extern_signature.pretty_print()
for _, f in cls._extern_fields.items()
]
return (
'extern "Python" {\n'
+ '\n'.join(extern_decls)
+ '\n}\n')
|
Generate stubs for the cffi bindings from @_extern_decl methods.
|
def undefine(vm_, **kwargs):
'''
Remove a defined vm, this does not purge the virtual machine image, and
this only works if the vm is powered down
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.undefine <domain>
'''
conn = __get_conn(**kwargs)
dom = _get_domain(conn, vm_)
if getattr(libvirt, 'VIR_DOMAIN_UNDEFINE_NVRAM', False):
# This one is only in 1.2.8+
ret = dom.undefineFlags(libvirt.VIR_DOMAIN_UNDEFINE_NVRAM) == 0
else:
ret = dom.undefine() == 0
conn.close()
return ret
|
Remove a defined vm, this does not purge the virtual machine image, and
this only works if the vm is powered down
:param vm_: domain name
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.undefine <domain>
|
def std(a, axis=None, ddof=0):
"""
Request the standard deviation of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
:param axis: Axis or axes along which the operation is performed.
The default (axis=None) is to perform the operation
over all the dimensions of the input array.
The axis may be negative, in which case it counts from
the last to the first axis.
If axis is a tuple of ints, the operation is performed
over multiple axes.
:type axis: None, or int, or iterable of ints.
:param int ddof: Delta Degrees of Freedom. The divisor used in
calculations is N - ddof, where N represents the
number of elements. By default ddof is zero.
:return: The Array representing the requested standard deviation.
:rtype: Array
"""
axes = _normalise_axis(axis, a)
if axes is None or len(axes) != 1:
msg = "This operation is currently limited to a single axis"
raise AxisSupportError(msg)
dtype = (np.array([0], dtype=a.dtype) / 1.).dtype
return _Aggregation(a, axes[0],
_StdStreamsHandler, _StdMaskedStreamsHandler,
dtype, dict(ddof=ddof))
|
Request the standard deviation of an Array over any number of axes.
.. note:: Currently limited to operating on a single axis.
:param axis: Axis or axes along which the operation is performed.
The default (axis=None) is to perform the operation
over all the dimensions of the input array.
The axis may be negative, in which case it counts from
the last to the first axis.
If axis is a tuple of ints, the operation is performed
over multiple axes.
:type axis: None, or int, or iterable of ints.
:param int ddof: Delta Degrees of Freedom. The divisor used in
calculations is N - ddof, where N represents the
number of elements. By default ddof is zero.
:return: The Array representing the requested standard deviation.
:rtype: Array
|
def lazy_connect(cls, pk):
"""
Create an object, setting its primary key without testing it. So the
instance is not connected
"""
instance = cls()
instance._pk = instance.pk.normalize(pk)
instance._connected = False
return instance
|
Create an object, setting its primary key without testing it. So the
instance is not connected
|
def __perform_request(self, url, type=GET, params=None):
"""
This method will perform the real request,
in this way we can customize only the "output" of the API call by
using self.__call_api method.
This method will return the request object.
"""
if params is None:
params = {}
if not self.token:
raise TokenError("No token provided. Please use a valid token")
url = urlparse.urljoin(self.end_point, url)
# lookup table to find out the appropriate requests method,
# headers and payload type (json or query parameters)
identity = lambda x: x
json_dumps = lambda x: json.dumps(x)
lookup = {
GET: (self._session.get, {}, 'params', identity),
POST: (self._session.post, {'Content-type': 'application/json'}, 'data',
json_dumps),
PUT: (self._session.put, {'Content-type': 'application/json'}, 'data',
json_dumps),
DELETE: (self._session.delete,
{'content-type': 'application/json'},
'data', json_dumps),
}
requests_method, headers, payload, transform = lookup[type]
agent = "{0}/{1} {2}/{3}".format('python-digitalocean',
__version__,
requests.__name__,
requests.__version__)
headers.update({'Authorization': 'Bearer ' + self.token,
'User-Agent': agent})
kwargs = {'headers': headers, payload: transform(params)}
timeout = self.get_timeout()
if timeout:
kwargs['timeout'] = timeout
# remove token from log
headers_str = str(headers).replace(self.token.strip(), 'TOKEN')
self._log.debug('%s %s %s:%s %s %s' %
(type, url, payload, params, headers_str, timeout))
return requests_method(url, **kwargs)
|
This method will perform the real request,
in this way we can customize only the "output" of the API call by
using self.__call_api method.
This method will return the request object.
|
def update_energy(self, bypass_check: bool = False):
"""Builds weekly, monthly and yearly dictionaries"""
if bypass_check or (not bypass_check and self.update_time_check):
self.get_weekly_energy()
if 'week' in self.energy:
self.get_monthly_energy()
self.get_yearly_energy()
if not bypass_check:
self.update_energy_ts = time.time()
|
Builds weekly, monthly and yearly dictionaries
|
def parse_comet(self):
"""Parse `targetname` as if it were a comet.
:return: (string or None, int or None, string or None);
The designation, number and prefix, and name of the comet as derived
from `self.targetname` are extracted into a tuple; each element that
does not exist is set to `None`. Parenthesis in `self.targetname`
will be ignored.
:example: the following table shows the result of the parsing:
+--------------------------------+--------------------------------+
|targetname |(desig, prefixnumber, name) |
+================================+================================+
|1P/Halley |(None, '1P', 'Halley') |
+--------------------------------+--------------------------------+
|3D/Biela |(None, '3D', 'Biela') |
+--------------------------------+--------------------------------+
|9P/Tempel 1 |(None, '9P', 'Tempel 1') |
+--------------------------------+--------------------------------+
|73P/Schwassmann Wachmann 3 C |(None, '73P', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-C/Schwassmann Wachmann 3 C |(None, '73P-C', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-BB |(None, '73P-BB', None) |
+--------------------------------+--------------------------------+
|322P |(None, '322P', None) |
+--------------------------------+--------------------------------+
|X/1106 C1 |('1166 C1', 'X', None) |
+--------------------------------+--------------------------------+
|P/1994 N2 (McNaught-Hartley) |('1994 N2', 'P', |
| |'McNaught-Hartley') |
+--------------------------------+--------------------------------+
|P/2001 YX127 (LINEAR) |('2001 YX127', 'P', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/-146 P1 |('-146 P1', 'C', None) |
+--------------------------------+--------------------------------+
|C/2001 A2-A (LINEAR) |('2001 A2-A', 'C', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/2013 US10 |('2013 US10', 'C', None) |
+--------------------------------+--------------------------------+
|C/2015 V2 (Johnson) |('2015 V2', 'C', 'Johnson') |
+--------------------------------+--------------------------------+
|C/2016 KA (Catalina) |('2016 KA', 'C', 'Catalina') |
+--------------------------------+--------------------------------+
"""
import re
pat = ('^(([1-9]+[PDCXAI](-[A-Z]{1,2})?)|[PDCXAI]/)' + # prefix [0,1,2]
'|([-]?[0-9]{3,4}[ _][A-Z]{1,2}([0-9]{1,3})?(-[1-9A-Z]{0,2})?)' +
# designation [3,4]
('|(([A-Z][a-z]?[A-Z]*[a-z]*[ -]?[A-Z]?[1-9]*[a-z]*)' +
'( [1-9A-Z]{1,2})*)') # name [5,6]
)
m = re.findall(pat, self.targetname.strip())
# print(m)
prefixnumber = None
desig = None
name = None
if len(m) > 0:
for el in m:
# prefix/number
if len(el[0]) > 0:
prefixnumber = el[0].replace('/', '')
# designation
if len(el[3]) > 0:
desig = el[3].replace('_', ' ')
# name
if len(el[5]) > 0:
if len(el[5]) > 1:
name = el[5]
return (desig, prefixnumber, name)
|
Parse `targetname` as if it were a comet.
:return: (string or None, int or None, string or None);
The designation, number and prefix, and name of the comet as derived
from `self.targetname` are extracted into a tuple; each element that
does not exist is set to `None`. Parenthesis in `self.targetname`
will be ignored.
:example: the following table shows the result of the parsing:
+--------------------------------+--------------------------------+
|targetname |(desig, prefixnumber, name) |
+================================+================================+
|1P/Halley |(None, '1P', 'Halley') |
+--------------------------------+--------------------------------+
|3D/Biela |(None, '3D', 'Biela') |
+--------------------------------+--------------------------------+
|9P/Tempel 1 |(None, '9P', 'Tempel 1') |
+--------------------------------+--------------------------------+
|73P/Schwassmann Wachmann 3 C |(None, '73P', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-C/Schwassmann Wachmann 3 C |(None, '73P-C', |
| |'Schwassmann Wachmann 3 C') |
+--------------------------------+--------------------------------+
|73P-BB |(None, '73P-BB', None) |
+--------------------------------+--------------------------------+
|322P |(None, '322P', None) |
+--------------------------------+--------------------------------+
|X/1106 C1 |('1166 C1', 'X', None) |
+--------------------------------+--------------------------------+
|P/1994 N2 (McNaught-Hartley) |('1994 N2', 'P', |
| |'McNaught-Hartley') |
+--------------------------------+--------------------------------+
|P/2001 YX127 (LINEAR) |('2001 YX127', 'P', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/-146 P1 |('-146 P1', 'C', None) |
+--------------------------------+--------------------------------+
|C/2001 A2-A (LINEAR) |('2001 A2-A', 'C', 'LINEAR') |
+--------------------------------+--------------------------------+
|C/2013 US10 |('2013 US10', 'C', None) |
+--------------------------------+--------------------------------+
|C/2015 V2 (Johnson) |('2015 V2', 'C', 'Johnson') |
+--------------------------------+--------------------------------+
|C/2016 KA (Catalina) |('2016 KA', 'C', 'Catalina') |
+--------------------------------+--------------------------------+
|
def _process_book(book_url):
"""
Parse available informations about book from the book details page.
Args:
book_url (str): Absolute URL of the book.
Returns:
obj: :class:`structures.Publication` instance with book details.
"""
data = DOWNER.download(book_url)
dom = dhtmlparser.parseString(data)
details_tags = dom.find("div", {"id": "contentDetail"})
assert details_tags, "Can't find details of the book."
details = details_tags[0]
# parse required informations
title = _parse_title(dom, details)
authors = _parse_authors(details)
publisher = _parse_publisher(details)
price = _parse_price(details)
pages, binding = _parse_pages_binding(details)
pub = Publication(
title,
authors,
price,
publisher
)
# parse optional informations
pub.optionals.URL = book_url
pub.optionals.binding = binding
pub.optionals.pages = pages
pub.optionals.ISBN, pub.optionals.EAN = _parse_ISBN_EAN(details)
pub.optionals.edition = _parse_edition(details)
pub.optionals.description = _parse_description(details)
return pub
|
Parse available informations about book from the book details page.
Args:
book_url (str): Absolute URL of the book.
Returns:
obj: :class:`structures.Publication` instance with book details.
|
def _get_real_ip(self):
"""
Get IP from request.
:param request: A usual request object
:type request: HttpRequest
:return: ipv4 string or None
"""
try:
# Trying to work with most common proxy headers
real_ip = self.request.META['HTTP_X_FORWARDED_FOR']
return real_ip.split(',')[0]
except KeyError:
return self.request.META['REMOTE_ADDR']
except Exception:
# Unknown IP
return None
|
Get IP from request.
:param request: A usual request object
:type request: HttpRequest
:return: ipv4 string or None
|
def cli(env, limit, closed=False, get_all=False):
"""Invoices and all that mess"""
manager = AccountManager(env.client)
invoices = manager.get_invoices(limit, closed, get_all)
table = formatting.Table([
"Id", "Created", "Type", "Status", "Starting Balance", "Ending Balance", "Invoice Amount", "Items"
])
table.align['Starting Balance'] = 'l'
table.align['Ending Balance'] = 'l'
table.align['Invoice Amount'] = 'l'
table.align['Items'] = 'l'
if isinstance(invoices, dict):
invoices = [invoices]
for invoice in invoices:
table.add_row([
invoice.get('id'),
utils.clean_time(invoice.get('createDate'), out_format="%Y-%m-%d"),
invoice.get('typeCode'),
invoice.get('statusCode'),
invoice.get('startingBalance'),
invoice.get('endingBalance'),
invoice.get('invoiceTotalAmount'),
invoice.get('itemCount')
])
env.fout(table)
|
Invoices and all that mess
|
def delete(self, records, context):
"""
Removes the inputted record from the database.
:param records | <orb.Collection>
context | <orb.Context>
:return <int> number of rows removed
"""
# include various schema records to remove
DELETE = self.statement('DELETE')
sql, data = DELETE(records, context)
if context.dryRun:
print sql % data
return 0
else:
return self.execute(sql, data, writeAccess=True)
|
Removes the inputted record from the database.
:param records | <orb.Collection>
context | <orb.Context>
:return <int> number of rows removed
|
def add_criterion(self, name, priority, and_or, search_type, value): # pylint: disable=too-many-arguments
"""Add a search criteria object to a smart group.
Args:
name: String Criteria type name (e.g. "Application Title")
priority: Int or Str number priority of criterion.
and_or: Str, either "and" or "or".
search_type: String Criteria search type. (e.g. "is", "is
not", "member of", etc). Construct a SmartGroup with the
criteria of interest in the web interface to determine
what range of values are available.
value: String value to search for/against.
"""
criterion = SearchCriteria(name, priority, and_or, search_type, value)
self.criteria.append(criterion)
|
Add a search criteria object to a smart group.
Args:
name: String Criteria type name (e.g. "Application Title")
priority: Int or Str number priority of criterion.
and_or: Str, either "and" or "or".
search_type: String Criteria search type. (e.g. "is", "is
not", "member of", etc). Construct a SmartGroup with the
criteria of interest in the web interface to determine
what range of values are available.
value: String value to search for/against.
|
def _set_pfiles(dry_run, **kwargs):
"""Set the PFILES env var
Parameters
----------
dry_run : bool
Don't actually run
Keyword arguments
-----------------
pfiles : str
Value to set PFILES
Returns
-------
pfiles_orig : str
Current value of PFILES envar
"""
pfiles_orig = os.environ['PFILES']
pfiles = kwargs.get('pfiles', None)
if pfiles:
if dry_run:
print("mkdir %s" % pfiles)
else:
try:
os.makedirs(pfiles)
except OSError:
pass
pfiles = "%s:%s" % (pfiles, pfiles_orig)
os.environ['PFILES'] = pfiles
return pfiles_orig
|
Set the PFILES env var
Parameters
----------
dry_run : bool
Don't actually run
Keyword arguments
-----------------
pfiles : str
Value to set PFILES
Returns
-------
pfiles_orig : str
Current value of PFILES envar
|
def ppf(q, df, loc=0.0, scale=1.0, gamma = 1.0):
"""
PPF function for Skew t distribution
"""
result = np.zeros(q.shape[0])
probzero = Skewt.cdf(x=np.zeros(1),loc=np.zeros(1),df=df,gamma=gamma)
result[q<probzero] = 1.0/gamma*ss.t.ppf(((np.power(gamma,2) + 1.0) * q[q<probzero])/2.0,df)
result[q>=probzero] = gamma*ss.t.ppf((1.0 + 1.0/np.power(gamma,2))/2.0*(q[q >= probzero] - probzero) + 0.5, df)
return result
|
PPF function for Skew t distribution
|
def get_profiles(self):
"""Returns set of profile names referenced in this Feature
:returns: set of profile names
"""
out = set(x.profile for x in self.requires if x.profile)
out.update(x.profile for x in self.removes if x.profile)
return out
|
Returns set of profile names referenced in this Feature
:returns: set of profile names
|
def sign_transaction(self, signer: Account):
"""
This interface is used to sign the transaction.
:param signer: an Account object which will sign the transaction.
:return: a Transaction object which has been signed.
"""
tx_hash = self.hash256()
sig_data = signer.generate_signature(tx_hash)
sig = [Sig([signer.get_public_key_bytes()], 1, [sig_data])]
self.sig_list = sig
|
This interface is used to sign the transaction.
:param signer: an Account object which will sign the transaction.
:return: a Transaction object which has been signed.
|
def set_state(self, state):
"""
used in unittests
"""
self.index_x.set(state[REG_X])
self.index_y.set(state[REG_Y])
self.user_stack_pointer.set(state[REG_U])
self.system_stack_pointer.set(state[REG_S])
self.program_counter.set(state[REG_PC])
self.accu_a.set(state[REG_A])
self.accu_b.set(state[REG_B])
self.direct_page.set(state[REG_DP])
self.set_cc(state[REG_CC])
self.cycles = state["cycles"]
self.memory.load(address=0x0000, data=state["RAM"])
|
used in unittests
|
def doNew(self, WHAT={}, **params):
"""This function will perform the command -new."""
if hasattr(WHAT, '_modified'):
for key in WHAT:
if key not in ['RECORDID','MODID']:
if WHAT.__new2old__.has_key(key):
self._addDBParam(WHAT.__new2old__[key].encode('utf-8'), WHAT[key])
else:
self._addDBParam(key, WHAT[key])
elif type(WHAT)==dict:
for key in WHAT:
self._addDBParam(key, WHAT[key])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doNew as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
for key in params:
self._addDBParam(key, params[key])
if len(self._dbParams) == 0:
raise FMError, 'No data to be added'
return self._doAction('-new')
|
This function will perform the command -new.
|
def notify(self, subsystem, recipient, subject, body_html, body_text):
"""You can send messages either to channels and private groups by using the following formats
#channel-name
@username-direct-message
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipient (`str`): Recipient
subject (`str`): Subject / title of the notification, not used for this notifier
body_html (`str)`: HTML formatted version of the message, not used for this notifier
body_text (`str`): Text formatted version of the message
Returns:
`None`
"""
if not re.match(self.validation, recipient, re.I):
raise ValueError('Invalid recipient provided')
if recipient.startswith('#'):
target_type = 'channel'
elif recipient.find('@') != -1:
target_type = 'user'
else:
self.log.error('Unknown contact type for Slack: {}'.format(recipient))
return
try:
self._send_message(
target_type=target_type,
target=recipient,
message=body_text,
title=subject
)
except SlackError as ex:
self.log.error('Failed sending message to {}: {}'.format(recipient, ex))
|
You can send messages either to channels and private groups by using the following formats
#channel-name
@username-direct-message
Args:
subsystem (`str`): Name of the subsystem originating the notification
recipient (`str`): Recipient
subject (`str`): Subject / title of the notification, not used for this notifier
body_html (`str)`: HTML formatted version of the message, not used for this notifier
body_text (`str`): Text formatted version of the message
Returns:
`None`
|
def save(self, file, contents, name=None, overwrite=False):
"""Save contents into a file. The format name can be specified
explicitly or inferred from the file extension."""
if name is None:
name = self.format_from_extension(op.splitext(file)[1])
file_format = self.file_type(name)
if file_format == 'text':
_write_text(file, contents)
elif file_format == 'json':
_write_json(file, contents)
else:
write_function = self._formats[name].get('save', None)
if write_function is None:
raise IOError("The format must declare a file type or "
"load/save functions.")
if op.exists(file) and not overwrite:
print("The file already exists, please use overwrite=True.")
return
write_function(file, contents)
|
Save contents into a file. The format name can be specified
explicitly or inferred from the file extension.
|
def find_one(driver, locator_list, elem_type=CSS, timeout=TIMEOUT):
"""
Args:
driver (selenium webdriver): Selenium webdriver object
locator_list (:obj: `list` of :obj: `str`): List of CSS selector strings
elem_type (Selenium By types): Selenium By type (i.e. By.CSS_SELECTOR)
timeout (int): Number of seconds to wait before timing out
Returns:
Selenium Element
Raises:
TimeoutException: Raised if no elements are found within the TIMEOUT
"""
def _find_one(driver):
""" Expected Condition to find and return first located element """
finders = {
CLASS_NAME: driver.find_elements_by_class_name,
CSS: driver.find_elements_by_css_selector,
ID: driver.find_elements_by_id,
LINK: driver.find_elements_by_link_text,
NAME: driver.find_elements_by_name,
PARTIAL_LINK: driver.find_elements_by_partial_link_text,
TAG: driver.find_elements_by_tag_name,
XPATH: driver.find_elements_by_xpath
}
elems = [finders[elem_type](loc) for loc in locator_list]
if any([len(elem_list) > 0 for elem_list in elems]):
return elems
else:
return False
raw_results = WebDriverWait(driver, timeout).until(_find_one)
# Pull out any found elements from lists
results = [elem for elem_list in raw_results for elem in elem_list]
return results.pop() if len(results) == 1 else results
|
Args:
driver (selenium webdriver): Selenium webdriver object
locator_list (:obj: `list` of :obj: `str`): List of CSS selector strings
elem_type (Selenium By types): Selenium By type (i.e. By.CSS_SELECTOR)
timeout (int): Number of seconds to wait before timing out
Returns:
Selenium Element
Raises:
TimeoutException: Raised if no elements are found within the TIMEOUT
|
def com_google_fonts_check_metadata_match_filename_postscript(font_metadata):
"""METADATA.pb font.filename and font.post_script_name
fields have equivalent values?
"""
post_script_name = font_metadata.post_script_name
filename = os.path.splitext(font_metadata.filename)[0]
if filename != post_script_name:
yield FAIL, ("METADATA.pb font filename=\"{}\" does not match"
" post_script_name=\"{}\"."
"").format(font_metadata.filename,
font_metadata.post_script_name)
else:
yield PASS, ("METADATA.pb font fields \"filename\" and"
" \"post_script_name\" have equivalent values.")
|
METADATA.pb font.filename and font.post_script_name
fields have equivalent values?
|
def images(self, type):
"""
Return the list of images available for this type on controller
and on the compute node.
"""
images = []
res = yield from self.http_query("GET", "/{}/images".format(type), timeout=None)
images = res.json
try:
if type in ["qemu", "dynamips", "iou"]:
for local_image in list_images(type):
if local_image['filename'] not in [i['filename'] for i in images]:
images.append(local_image)
images = sorted(images, key=itemgetter('filename'))
else:
images = sorted(images, key=itemgetter('image'))
except OSError as e:
raise ComputeError("Can't list images: {}".format(str(e)))
return images
|
Return the list of images available for this type on controller
and on the compute node.
|
def read_header(filename):
''' returns a dictionary of values in the header of the given file '''
header = {}
in_header = False
data = nl.universal_read(filename)
lines = [x.strip() for x in data.split('\n')]
for line in lines:
if line=="*** Header Start ***":
in_header=True
continue
if line=="*** Header End ***":
return header
fields = line.split(": ")
if len(fields)==2:
header[fields[0]] = fields[1]
|
returns a dictionary of values in the header of the given file
|
def sign_digest_deterministic(self, digest, hashfunc=None, sigencode=sigencode_string):
"""
Calculates 'k' from data itself, removing the need for strong
random generator and producing deterministic (reproducible) signatures.
See RFC 6979 for more details.
"""
secexp = self.privkey.secret_multiplier
k = rfc6979.generate_k(
self.curve.generator.order(), secexp, hashfunc, digest)
return self.sign_digest(digest, sigencode=sigencode, k=k)
|
Calculates 'k' from data itself, removing the need for strong
random generator and producing deterministic (reproducible) signatures.
See RFC 6979 for more details.
|
def is_close_to(self, other, tolerance):
"""Asserts that val is numeric and is close to other within tolerance."""
self._validate_close_to_args(self.val, other, tolerance)
if self.val < (other-tolerance) or self.val > (other+tolerance):
if type(self.val) is datetime.datetime:
tolerance_seconds = tolerance.days * 86400 + tolerance.seconds + tolerance.microseconds / 1000000
h, rem = divmod(tolerance_seconds, 3600)
m, s = divmod(rem, 60)
self._err('Expected <%s> to be close to <%s> within tolerance <%d:%02d:%02d>, but was not.' % (self.val.strftime('%Y-%m-%d %H:%M:%S'), other.strftime('%Y-%m-%d %H:%M:%S'), h, m, s))
else:
self._err('Expected <%s> to be close to <%s> within tolerance <%s>, but was not.' % (self.val, other, tolerance))
return self
|
Asserts that val is numeric and is close to other within tolerance.
|
def read_gtfs(path: Path, dist_units: str) -> "Feed":
"""
Create a Feed instance from the given path and given distance units.
The path should be a directory containing GTFS text files or a
zip file that unzips as a collection of GTFS text files
(and not as a directory containing GTFS text files).
The distance units given must lie in :const:`constants.dist_units`
Notes
-----
- Ignore non-GTFS files
- Automatically strip whitespace from the column names in GTFS files
"""
path = Path(path)
if not path.exists():
raise ValueError(f"Path {path} does not exist")
# Unzip path to temporary directory if necessary
if path.is_file():
zipped = True
tmp_dir = tempfile.TemporaryDirectory()
src_path = Path(tmp_dir.name)
shutil.unpack_archive(str(path), tmp_dir.name, "zip")
else:
zipped = False
src_path = path
# Read files into feed dictionary of DataFrames
feed_dict = {table: None for table in cs.GTFS_REF["table"]}
for p in src_path.iterdir():
table = p.stem
# Skip empty files, irrelevant files, and files with no data
if p.is_file() and p.stat().st_size and table in feed_dict:
# utf-8-sig gets rid of the byte order mark (BOM);
# see http://stackoverflow.com/questions/17912307/u-ufeff-in-python-string
df = pd.read_csv(p, dtype=cs.DTYPE, encoding="utf-8-sig")
if not df.empty:
feed_dict[table] = cn.clean_column_names(df)
feed_dict["dist_units"] = dist_units
# Delete temporary directory
if zipped:
tmp_dir.cleanup()
# Create feed
return Feed(**feed_dict)
|
Create a Feed instance from the given path and given distance units.
The path should be a directory containing GTFS text files or a
zip file that unzips as a collection of GTFS text files
(and not as a directory containing GTFS text files).
The distance units given must lie in :const:`constants.dist_units`
Notes
-----
- Ignore non-GTFS files
- Automatically strip whitespace from the column names in GTFS files
|
def AgregarDatoPDF(self, campo, valor, pagina='T'):
"Agrego un dato a la factura (internamente)"
# corrijo path relativo para las imágenes (compatibilidad hacia atrás):
if campo == 'fondo' and valor.startswith(self.InstallDir):
if not os.path.exists(valor):
valor = os.path.join(self.InstallDir, "plantillas", os.path.basename(valor))
if DEBUG: print "NUEVO PATH:", valor
self.datos[campo] = valor
return True
|
Agrego un dato a la factura (internamente)
|
def exclude_reference_link(self, exclude):
"""Sets `sysparm_exclude_reference_link` to a bool value
:param exclude: bool
"""
if not isinstance(exclude, bool):
raise InvalidUsage('exclude_reference_link must be of type bool')
self._sysparms['sysparm_exclude_reference_link'] = exclude
|
Sets `sysparm_exclude_reference_link` to a bool value
:param exclude: bool
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.