code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _astore32(ins):
''' Stores 2º operand content into address of 1st operand.
store16 a, x => *(&a) = x
'''
output = _addr(ins.quad[1])
value = ins.quad[2]
if value[0] == '*':
value = value[1:]
indirect = True
else:
indirect = False
try:
value = int(ins.quad[2]) & 0xFFFFFFFF # Immediate?
if indirect:
output.append('push hl')
output.append('ld hl, %i' % (value & 0xFFFF))
output.append('call __ILOAD32')
output.append('ld b, h')
output.append('ld c, l') # BC = Lower 16 bits
output.append('pop hl')
REQUIRES.add('iload32.asm')
else:
output.append('ld de, %i' % (value >> 16))
output.append('ld bc, %i' % (value & 0xFFFF))
except ValueError:
output.append('pop bc')
output.append('pop de')
output.append('call __STORE32')
REQUIRES.add('store32.asm')
return output
|
Stores 2º operand content into address of 1st operand.
store16 a, x => *(&a) = x
|
def update_file(filename, result, content, indent):
"""Updates a Jekyll file to contain the counts form an object
This just converts the results to YAML and adds to the Jekyll frontmatter.
Args:
filename: the Jekyll file to update
result: the results object from `wc`
content: the contents of the original file
indent: the indentation level for dumping YAML
"""
# Split the file into frontmatter and content
parts = re.split('---+', content, 2)
# Load the frontmatter into an object
frontmatter = yaml.safe_load(parts[1])
# Add the counts entry in the results object to the frontmatter
frontmatter['counts'] = result['counts']
# Set the frontmatter part backed to the stringified version of the
# frontmatter object
parts[1] = '\n{}'.format(
yaml.safe_dump(frontmatter, default_flow_style=False, indent=indent))
result = '---'.join(parts)
# Write everything back to the file
with open(filename, 'wb') as f:
f.write(result.encode('utf-8'))
print('{} updated.'.format(filename))
|
Updates a Jekyll file to contain the counts form an object
This just converts the results to YAML and adds to the Jekyll frontmatter.
Args:
filename: the Jekyll file to update
result: the results object from `wc`
content: the contents of the original file
indent: the indentation level for dumping YAML
|
def assertType(var, *allowedTypes):
"""
Asserts that a variable @var is of an @expectedType. Raises a TypeError
if the assertion fails.
"""
if not isinstance(var, *allowedTypes):
raise NotImplementedError("This operation is only supported for {}. "\
"Instead found {}".format(str(*allowedTypes), type(var)))
|
Asserts that a variable @var is of an @expectedType. Raises a TypeError
if the assertion fails.
|
def extract_captions(tex_file, sdir, image_list, primary=True):
"""Extract captions.
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
:param: lines (list): list of lines of the TeX file
:param: tex_file (string): the name of the TeX file which mentions
the images
:param: sdir (string): path to current sub-directory
:param: image_list (list): list of images in tarball
:param: primary (bool): is this the primary call to extract_caption?
:return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file
"""
if os.path.isdir(tex_file) or not os.path.exists(tex_file):
return []
lines = get_lines_from_file(tex_file)
# possible figure lead-ins
figure_head = u'\\begin{figure' # also matches figure*
figure_wrap_head = u'\\begin{wrapfigure'
figure_tail = u'\\end{figure' # also matches figure*
figure_wrap_tail = u'\\end{wrapfigure'
picture_head = u'\\begin{picture}'
displaymath_head = u'\\begin{displaymath}'
subfloat_head = u'\\subfloat'
subfig_head = u'\\subfigure'
includegraphics_head = u'\\includegraphics'
epsfig_head = u'\\epsfig'
input_head = u'\\input'
# possible caption lead-ins
caption_head = u'\\caption'
figcaption_head = u'\\figcaption'
label_head = u'\\label'
rotate = u'rotate='
angle = u'angle='
eps_tail = u'.eps'
ps_tail = u'.ps'
doc_head = u'\\begin{document}'
doc_tail = u'\\end{document}'
extracted_image_data = []
cur_image = ''
caption = ''
labels = []
active_label = ""
# cut out shit before the doc head
if primary:
for line_index in range(len(lines)):
if lines[line_index].find(doc_head) < 0:
lines[line_index] = ''
else:
break
# are we using commas in filenames here?
commas_okay = False
for dummy1, dummy2, filenames in \
os.walk(os.path.split(os.path.split(tex_file)[0])[0]):
for filename in filenames:
if filename.find(',') > -1:
commas_okay = True
break
# a comment is a % not preceded by a \
comment = re.compile("(?<!\\\\)%")
for line_index in range(len(lines)):
# get rid of pesky comments by splitting where the comment is
# and keeping only the part before the %
line = comment.split(lines[line_index])[0]
line = line.strip()
lines[line_index] = line
in_figure_tag = 0
for line_index in range(len(lines)):
line = lines[line_index]
if line == '':
continue
if line.find(doc_tail) > -1:
break
"""
FIGURE -
structure of a figure:
\begin{figure}
\formatting...
\includegraphics[someoptions]{FILENAME}
\caption{CAPTION} %caption and includegraphics may be switched!
\end{figure}
"""
index = max([line.find(figure_head), line.find(figure_wrap_head)])
if index > -1:
in_figure_tag = 1
# some punks don't like to put things in the figure tag. so we
# just want to see if there is anything that is sitting outside
# of it when we find it
cur_image, caption, extracted_image_data = put_it_together(
cur_image, caption,
active_label, extracted_image_data,
line_index, lines)
# here, you jerks, just make it so that it's fecking impossible to
# figure out your damn inclusion types
index = max([line.find(eps_tail), line.find(ps_tail),
line.find(epsfig_head)])
if index > -1:
if line.find(eps_tail) > -1 or line.find(ps_tail) > -1:
ext = True
else:
ext = False
filenames = intelligently_find_filenames(line, ext=ext,
commas_okay=commas_okay)
# try to look ahead! sometimes there are better matches after
if line_index < len(lines) - 1:
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index < len(lines) - 2:
filenames.extend(intelligently_find_filenames(
lines[line_index + 2],
commas_okay=commas_okay))
for filename in filenames:
filename = filename.encode('utf-8', 'ignore')
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
Rotate and angle
"""
index = max(line.find(rotate), line.find(angle))
if index > -1:
# which is the image associated to it?
filenames = intelligently_find_filenames(line,
commas_okay=commas_okay)
# try the line after and the line before
if line_index + 1 < len(lines):
filenames.extend(intelligently_find_filenames(
lines[line_index + 1],
commas_okay=commas_okay))
if line_index > 1:
filenames.extend(intelligently_find_filenames(
lines[line_index - 1],
commas_okay=commas_okay))
already_tried = []
for filename in filenames:
if filename != 'ERROR' and filename not in already_tried:
if rotate_image(filename, line, sdir, image_list):
break
already_tried.append(filename)
"""
INCLUDEGRAPHICS -
structure of includegraphics:
\includegraphics[someoptions]{FILENAME}
"""
index = line.find(includegraphics_head)
if index > -1:
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index, index, '{', lines)
filename = lines[open_curly_line][open_curly + 1:close_curly]
if cur_image == '':
cur_image = filename
elif type(cur_image) == list:
if type(cur_image[SUB_CAPTION_OR_IMAGE]) == list:
cur_image[SUB_CAPTION_OR_IMAGE].append(filename)
else:
cur_image[SUB_CAPTION_OR_IMAGE] = [filename]
else:
cur_image = ['', [cur_image, filename]]
"""
{\input{FILENAME}}
\caption{CAPTION}
This input is ambiguous, since input is also used for things like
inclusion of data from other LaTeX files directly.
"""
index = line.find(input_head)
if index > -1:
new_tex_names = intelligently_find_filenames(
line, TeX=True,
commas_okay=commas_okay)
for new_tex_name in new_tex_names:
if new_tex_name != 'ERROR':
new_tex_file = get_tex_location(new_tex_name, tex_file)
if new_tex_file and primary: # to kill recursion
extracted_image_data.extend(extract_captions(
new_tex_file, sdir,
image_list,
primary=False
))
"""PICTURE"""
index = line.find(picture_head)
if index > -1:
# structure of a picture:
# \begin{picture}
# ....not worrying about this now
# print('found picture tag')
# FIXME
pass
"""DISPLAYMATH"""
index = line.find(displaymath_head)
if index > -1:
# structure of a displaymath:
# \begin{displaymath}
# ....not worrying about this now
# print('found displaymath tag')
# FIXME
pass
"""
CAPTIONS -
structure of a caption:
\caption[someoptions]{CAPTION}
or
\caption{CAPTION}
or
\caption{{options}{CAPTION}}
"""
index = max([line.find(caption_head), line.find(figcaption_head)])
if index > -1:
open_curly, open_curly_line, close_curly, close_curly_line = \
find_open_and_close_braces(line_index, index, '{', lines)
cap_begin = open_curly + 1
cur_caption = assemble_caption(
open_curly_line, cap_begin,
close_curly_line, close_curly, lines)
if caption == '':
caption = cur_caption
elif type(caption) == list:
if type(caption[SUB_CAPTION_OR_IMAGE]) == list:
caption[SUB_CAPTION_OR_IMAGE].append(cur_caption)
else:
caption[SUB_CAPTION_OR_IMAGE] = [cur_caption]
elif caption != cur_caption:
caption = ['', [caption, cur_caption]]
"""
SUBFLOATS -
structure of a subfloat (inside of a figure tag):
\subfloat[CAPTION]{options{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfloat_head)
if index > -1:
# if we are dealing with subfloats, we need a different
# sort of structure to keep track of captions and subcaptions
if not isinstance(cur_image, list):
cur_image = [cur_image, []]
if not isinstance(caption, list):
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(
open_square_line,
cap_begin, close_square_line, close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(close_square_line,
close_square, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
SUBFIGURES -
structure of a subfigure (inside a figure tag):
\subfigure[CAPTION]{
\includegraphics[options]{FILENAME}}
also associated with the overall caption of the enclosing figure
"""
index = line.find(subfig_head)
if index > -1:
# like with subfloats, we need a different structure for keepin
# track of this stuff
if type(cur_image) != list:
cur_image = [cur_image, []]
if type(caption) != list:
caption = [caption, []]
open_square, open_square_line, close_square, close_square_line = \
find_open_and_close_braces(line_index, index, '[', lines)
cap_begin = open_square + 1
sub_caption = assemble_caption(open_square_line,
cap_begin, close_square_line,
close_square, lines)
caption[SUB_CAPTION_OR_IMAGE].append(sub_caption)
index_cpy = index
# find the graphics tag to get the filename
# it is okay if we eat lines here
index = line.find(includegraphics_head)
while index == -1 and (line_index + 1) < len(lines):
line_index += 1
line = lines[line_index]
index = line.find(includegraphics_head)
if line_index == len(lines):
# didn't find the image name on line
line_index = index_cpy
open_curly, open_curly_line, close_curly, dummy = \
find_open_and_close_braces(line_index,
index, '{', lines)
sub_image = lines[open_curly_line][open_curly + 1:close_curly]
cur_image[SUB_CAPTION_OR_IMAGE].append(sub_image)
"""
LABELS -
structure of a label:
\label{somelabelnamewhichprobablyincludesacolon}
Labels are used to tag images and will later be used in ref tags
to reference them. This is interesting because in effect the refs
to a plot are additional caption for it.
Notes: labels can be used for many more things than just plots.
We'll have to experiment with how to best associate a label with an
image.. if it's in the caption, it's easy. If it's in a figure, it's
still okay... but the images that aren't in figure tags are numerous.
"""
index = line.find(label_head)
if index > -1 and in_figure_tag:
open_curly, open_curly_line, close_curly, dummy =\
find_open_and_close_braces(line_index,
index, '{', lines)
label = lines[open_curly_line][open_curly + 1:close_curly]
if label not in labels:
active_label = label
labels.append(label)
"""
FIGURE
important: we put the check for the end of the figure at the end
of the loop in case some pathological person puts everything in one
line
"""
index = max([
line.find(figure_tail),
line.find(figure_wrap_tail),
line.find(doc_tail)
])
if index > -1:
in_figure_tag = 0
cur_image, caption, extracted_image_data = \
put_it_together(cur_image, caption, active_label,
extracted_image_data,
line_index, lines)
"""
END DOCUMENT
we shouldn't look at anything after the end document tag is found
"""
index = line.find(doc_tail)
if index > -1:
break
return extracted_image_data
|
Extract captions.
Take the TeX file and the list of images in the tarball (which all,
presumably, are used in the TeX file) and figure out which captions
in the text are associated with which images
:param: lines (list): list of lines of the TeX file
:param: tex_file (string): the name of the TeX file which mentions
the images
:param: sdir (string): path to current sub-directory
:param: image_list (list): list of images in tarball
:param: primary (bool): is this the primary call to extract_caption?
:return: images_and_captions_and_labels ([(string, string, list),
(string, string, list), ...]):
a list of tuples representing the names of images and their
corresponding figure labels from the TeX file
|
def match(self, path):
u"""
:type path: text_type
:param path: The path to match against this list of path specs.
:return:
"""
for spec in reversed(self.pathspecs): # type: Pathspec
if spec.pattern.match(path):
return not spec.negated
return False
|
u"""
:type path: text_type
:param path: The path to match against this list of path specs.
:return:
|
def inspect_signature_parameters(callable_, excluded=None):
"""Get the parameters of a callable.
Returns a list with the signature parameters of `callable_`.
Parameters contained in `excluded` tuple will not be included
in the result.
:param callable_: callable object
:param excluded: tuple with default parameters to exclude
:result: list of parameters
"""
if not excluded:
excluded = ()
signature = inspect.signature(callable_)
params = [
v for p, v in signature.parameters.items()
if p not in excluded
]
return params
|
Get the parameters of a callable.
Returns a list with the signature parameters of `callable_`.
Parameters contained in `excluded` tuple will not be included
in the result.
:param callable_: callable object
:param excluded: tuple with default parameters to exclude
:result: list of parameters
|
def save(self, *args, **kwargs):
"""
**uid**: :code:`division_cycle_ballotmeasure:{number}`
"""
self.uid = '{}_{}_ballotmeasure:{}'.format(
self.division.uid,
self.election_day.uid,
self.number
)
super(BallotMeasure, self).save(*args, **kwargs)
|
**uid**: :code:`division_cycle_ballotmeasure:{number}`
|
def can_use_c_for(self, node):
"""
Check if a for loop can use classic C syntax.
To use C syntax:
- target should not be assign in the loop
- xrange should be use as iterator
- order have to be known at compile time
"""
assert isinstance(node.target, ast.Name)
if sys.version_info.major == 3:
range_name = 'range'
else:
range_name = 'xrange'
pattern_range = ast.Call(func=ast.Attribute(
value=ast.Name(id='__builtin__',
ctx=ast.Load(),
annotation=None),
attr=range_name, ctx=ast.Load()),
args=AST_any(), keywords=[])
is_assigned = {node.target.id: False}
[is_assigned.update(self.gather(IsAssigned, stmt))
for stmt in node.body]
nodes = ASTMatcher(pattern_range).search(node.iter)
if (node.iter not in nodes or is_assigned[node.target.id]):
return False
args = node.iter.args
if len(args) < 3:
return True
if isinstance(args[2], ast.Num):
return True
return False
|
Check if a for loop can use classic C syntax.
To use C syntax:
- target should not be assign in the loop
- xrange should be use as iterator
- order have to be known at compile time
|
def run_key(self, key):
'''
Return a function that executes the arguments passed via the local
client
'''
def func(*args, **kwargs):
'''
Run a remote call
'''
args = list(args)
for _key, _val in kwargs:
args.append('{0}={1}'.format(_key, _val))
return self.local.cmd(self.minion, key, args)
return func
|
Return a function that executes the arguments passed via the local
client
|
def match_https_hostname(cls, hostname):
"""
:param hostname: a string
:returns: an :py:class:`~httpretty.core.URLMatcher` or ``None``
"""
items = sorted(
cls._entries.items(),
key=lambda matcher_entries: matcher_entries[0].priority,
reverse=True,
)
for matcher, value in items:
if matcher.info is None:
pattern_with_port = "https://{0}:".format(hostname)
pattern_without_port = "https://{0}/".format(hostname)
hostname_pattern = (
hostname_re
.match(matcher.regex.pattern)
.group(0)
)
for pattern in [pattern_with_port, pattern_without_port]:
if re.match(hostname_pattern, pattern):
return matcher
elif matcher.info.hostname == hostname:
return matcher
return None
|
:param hostname: a string
:returns: an :py:class:`~httpretty.core.URLMatcher` or ``None``
|
def add_group(id, description=None):
""" Adds group to the DCOS Enterprise. If not description
is provided the id will be used for the description.
:param id: group id
:type id: str
:param desc: description of user
:type desc: str
"""
if not description:
description = id
data = {
'description': description
}
acl_url = urljoin(_acl_url(), 'groups/{}'.format(id))
try:
r = http.put(acl_url, json=data)
assert r.status_code == 201
except DCOSHTTPException as e:
if e.response.status_code != 409:
raise
|
Adds group to the DCOS Enterprise. If not description
is provided the id will be used for the description.
:param id: group id
:type id: str
:param desc: description of user
:type desc: str
|
def validate(self, str_in):
# type: (Text) -> None
""" Validates an entry in the field.
Raises `InvalidEntryError` iff the entry is invalid.
An entry is invalid iff (1) the string does not represent a
date in the correct format; or (2) the date it represents
is invalid (such as 30 February).
:param str str_in: String to validate.
:raises InvalidEntryError: Iff entry is invalid.
:raises ValueError: When self.format is unrecognised.
"""
if self.is_missing_value(str_in):
return
# noinspection PyCompatibility
super().validate(str_in)
try:
datetime.strptime(str_in, self.format)
except ValueError as e:
msg = "Validation error for date type: {}".format(e)
e_new = InvalidEntryError(msg)
e_new.field_spec = self
raise_from(e_new, e)
|
Validates an entry in the field.
Raises `InvalidEntryError` iff the entry is invalid.
An entry is invalid iff (1) the string does not represent a
date in the correct format; or (2) the date it represents
is invalid (such as 30 February).
:param str str_in: String to validate.
:raises InvalidEntryError: Iff entry is invalid.
:raises ValueError: When self.format is unrecognised.
|
def _args2_fpath(dpath, fname, cfgstr, ext):
r"""
Ensures that the filename is not too long
Internal util_cache helper function
Windows MAX_PATH=260 characters
Absolute length is limited to 32,000 characters
Each filename component is limited to 255 characters
Args:
dpath (str):
fname (str):
cfgstr (str):
ext (str):
Returns:
str: fpath
CommandLine:
python -m utool.util_cache --test-_args2_fpath
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> from utool.util_cache import _args2_fpath
>>> import utool as ut
>>> dpath = 'F:\\data\\work\\PZ_MTEST\\_ibsdb\\_ibeis_cache'
>>> fname = 'normalizer_'
>>> cfgstr = u'PZ_MTEST_DSUUIDS((9)67j%dr%&bl%4oh4+)_QSUUIDS((9)67j%dr%&bl%4oh4+)zebra_plains_vsone_NN(single,K1+1,last,cks1024)_FILT(ratio<0.625;1.0,fg;1.0)_SV(0.01;2;1.57minIn=4,nRR=50,nsum,)_AGG(nsum)_FLANN(4_kdtrees)_FEATWEIGHT(ON,uselabel,rf)_FEAT(hesaff+sift_)_CHIP(sz450)'
>>> ext = '.cPkl'
>>> fpath = _args2_fpath(dpath, fname, cfgstr, ext)
>>> result = str(ut.ensure_unixslash(fpath))
>>> target = 'F:/data/work/PZ_MTEST/_ibsdb/_ibeis_cache/normalizer_xfylfboirymmcpfg.cPkl'
>>> ut.assert_eq(result, target)
"""
if len(ext) > 0 and ext[0] != '.':
raise ValueError('Please be explicit and use a dot in ext')
max_len = 128
# should hashlen be larger?
cfgstr_hashlen = 16
prefix = fname
fname_cfgstr = consensed_cfgstr(prefix, cfgstr, max_len=max_len,
cfgstr_hashlen=cfgstr_hashlen)
fpath = join(dpath, fname_cfgstr + ext)
fpath = normpath(fpath)
return fpath
|
r"""
Ensures that the filename is not too long
Internal util_cache helper function
Windows MAX_PATH=260 characters
Absolute length is limited to 32,000 characters
Each filename component is limited to 255 characters
Args:
dpath (str):
fname (str):
cfgstr (str):
ext (str):
Returns:
str: fpath
CommandLine:
python -m utool.util_cache --test-_args2_fpath
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_cache import * # NOQA
>>> from utool.util_cache import _args2_fpath
>>> import utool as ut
>>> dpath = 'F:\\data\\work\\PZ_MTEST\\_ibsdb\\_ibeis_cache'
>>> fname = 'normalizer_'
>>> cfgstr = u'PZ_MTEST_DSUUIDS((9)67j%dr%&bl%4oh4+)_QSUUIDS((9)67j%dr%&bl%4oh4+)zebra_plains_vsone_NN(single,K1+1,last,cks1024)_FILT(ratio<0.625;1.0,fg;1.0)_SV(0.01;2;1.57minIn=4,nRR=50,nsum,)_AGG(nsum)_FLANN(4_kdtrees)_FEATWEIGHT(ON,uselabel,rf)_FEAT(hesaff+sift_)_CHIP(sz450)'
>>> ext = '.cPkl'
>>> fpath = _args2_fpath(dpath, fname, cfgstr, ext)
>>> result = str(ut.ensure_unixslash(fpath))
>>> target = 'F:/data/work/PZ_MTEST/_ibsdb/_ibeis_cache/normalizer_xfylfboirymmcpfg.cPkl'
>>> ut.assert_eq(result, target)
|
def load(self, source):
"""
Opens the source file.
"""
self.source = open(self.source, 'rb')
self.loaded = True
|
Opens the source file.
|
def _colorize(val, color):
"""Colorize a string using termcolor or colorama.
If any of them are available.
"""
if termcolor is not None:
val = termcolor.colored(val, color)
elif colorama is not None:
val = TERMCOLOR2COLORAMA[color] + val + colorama.Style.RESET_ALL
return val
|
Colorize a string using termcolor or colorama.
If any of them are available.
|
def Operation(self, x, y):
"""Whether x is fully contained in y."""
if x in y:
return True
# x might be an iterable
# first we need to skip strings or we'll do silly things
# pylint: disable=consider-merging-isinstance
if isinstance(x, py2to3.STRING_TYPES) or isinstance(x, bytes):
return False
try:
for value in x:
if value not in y:
return False
return True
except TypeError:
return False
|
Whether x is fully contained in y.
|
def _set_fill_word(self, v, load=False):
"""
Setter method for fill_word, mapped from YANG variable /interface/fc_port/fill_word (fc-fillword-cfg-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fill_word is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fill_word() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'idle-idle': {'value': 0}, u'arbff-arbff': {'value': 1}, u'idle-arbff': {'value': 2}, u'aa-then-ia': {'value': 3}},), default=unicode("idle-idle"), is_leaf=True, yang_name="fill-word", rest_name="fill-word", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Fill Word', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='fc-fillword-cfg-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """fill_word must be of a type compatible with fc-fillword-cfg-type""",
'defined-type': "brocade-interface:fc-fillword-cfg-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'idle-idle': {'value': 0}, u'arbff-arbff': {'value': 1}, u'idle-arbff': {'value': 2}, u'aa-then-ia': {'value': 3}},), default=unicode("idle-idle"), is_leaf=True, yang_name="fill-word", rest_name="fill-word", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Fill Word', u'hidden': u'full', u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='fc-fillword-cfg-type', is_config=True)""",
})
self.__fill_word = t
if hasattr(self, '_set'):
self._set()
|
Setter method for fill_word, mapped from YANG variable /interface/fc_port/fill_word (fc-fillword-cfg-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_fill_word is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_fill_word() directly.
|
def _build_url(*args, **kwargs) -> str:
"""
Return a valid url.
"""
resource_url = API_RESOURCES_URLS
for key in args:
resource_url = resource_url[key]
if kwargs:
resource_url = resource_url.format(**kwargs)
return urljoin(URL, resource_url)
|
Return a valid url.
|
def get_pointgroup(self, tolerance=0.3):
"""Returns a PointGroup object for the molecule.
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
:class:`~PointGroupOperations`
"""
PA = self._get_point_group_analyzer(tolerance=tolerance)
return PointGroupOperations(PA.sch_symbol, PA.symmops)
|
Returns a PointGroup object for the molecule.
Args:
tolerance (float): Tolerance to generate the full set of symmetry
operations.
Returns:
:class:`~PointGroupOperations`
|
def set_configs(self, key, d):
"""Set the whole configuration for a key"""
if '_config' in self.proxy:
self.proxy['_config'][key] = d
else:
self.proxy['_config'] = {key: d}
|
Set the whole configuration for a key
|
def pref_update(self, key, new_val):
""" Changes a preference value and saves it to disk """
print('Update and save pref from: %s=%r, to: %s=%r' %
(key, six.text_type(self[key]), key, six.text_type(new_val)))
self.__setattr__(key, new_val)
return self.save()
|
Changes a preference value and saves it to disk
|
def tupletree(table, start='start', stop='stop', value=None):
"""
Construct an interval tree for the given table, where each node in the tree
is a row of the table.
"""
import intervaltree
tree = intervaltree.IntervalTree()
it = iter(table)
hdr = next(it)
flds = list(map(text_type, hdr))
assert start in flds, 'start field not recognised'
assert stop in flds, 'stop field not recognised'
getstart = itemgetter(flds.index(start))
getstop = itemgetter(flds.index(stop))
if value is None:
getvalue = tuple
else:
valueindices = asindices(hdr, value)
assert len(valueindices) > 0, 'invalid value field specification'
getvalue = itemgetter(*valueindices)
for row in it:
tree.addi(getstart(row), getstop(row), getvalue(row))
return tree
|
Construct an interval tree for the given table, where each node in the tree
is a row of the table.
|
def show_cluster_role(cl_args, cluster, role):
''' print topologies information to stdout '''
try:
result = tracker_access.get_cluster_role_topologies(cluster, role)
if not result:
Log.error('Unknown cluster/role \'%s\'' % '/'.join([cluster, role]))
return False
result = result[cluster]
except Exception:
Log.error("Fail to connect to tracker: \'%s\'", cl_args["tracker_url"])
return False
table, header, rest_count = to_table(result)
print('Topologies running in cluster \'%s\' submitted by \'%s\':' % (cluster, role))
if rest_count:
print(' with %d more...' % rest_count)
print(tabulate(table, headers=header))
return True
|
print topologies information to stdout
|
def update_compliance_task(self, id, name=None, module_name=None, schedule=None, scope=None, enabled=None):
'''**Description**
Update an existing compliance task.
**Arguments**
- id: the id of the compliance task to be updated.
- name: The name of the task e.g. 'Check Docker Compliance'.
- module_name: The name of the module that implements this task. Separate from task name in case you want to use the same module to run separate tasks with different scopes or schedules. [ 'docker-bench-security', 'kube-bench' ]
- schedule: The frequency at which this task should run. Expressed as an `ISO 8601 Duration <https://en.wikipedia.org/wiki/ISO_8601#Durations>`_
- scope: The agent will only run the task on hosts matching this scope or on hosts where containers match this scope.
- enabled: Whether this task should actually run as defined by its schedule.
**Success Return Value**
A JSON representation of the compliance task.
'''
ok, res = self.get_compliance_task(id)
if not ok:
return ok, res
task = res
options = {
'name': name,
'moduleName': module_name,
'schedule': schedule,
'scope': scope,
'enabled': enabled
}
task.update({k: v for k, v in options.items() if v is not None})
res = requests.put(self.url + '/api/complianceTasks/{}'.format(id), data=json.dumps(task), headers=self.hdrs, verify=self.ssl_verify)
return self._request_result(res)
|
**Description**
Update an existing compliance task.
**Arguments**
- id: the id of the compliance task to be updated.
- name: The name of the task e.g. 'Check Docker Compliance'.
- module_name: The name of the module that implements this task. Separate from task name in case you want to use the same module to run separate tasks with different scopes or schedules. [ 'docker-bench-security', 'kube-bench' ]
- schedule: The frequency at which this task should run. Expressed as an `ISO 8601 Duration <https://en.wikipedia.org/wiki/ISO_8601#Durations>`_
- scope: The agent will only run the task on hosts matching this scope or on hosts where containers match this scope.
- enabled: Whether this task should actually run as defined by its schedule.
**Success Return Value**
A JSON representation of the compliance task.
|
def add_proof(self, text, publisher_account, keeper):
"""Add a proof to the DDO, based on the public_key id/index and signed with the private key
add a static proof to the DDO, based on one of the public keys."""
# just incase clear out the current static proof property
self._proof = None
self._proof = {
'type': PROOF_TYPE,
'created': DDO._get_timestamp(),
'creator': publisher_account.address,
'signatureValue': keeper.sign_hash(text, publisher_account),
}
|
Add a proof to the DDO, based on the public_key id/index and signed with the private key
add a static proof to the DDO, based on one of the public keys.
|
def rot_matrix(angle):
r"""Rotation matrix
This method produces a 2x2 rotation matrix for the given input angle.
Parameters
----------
angle : float
Rotation angle in radians
Returns
-------
np.ndarray 2x2 rotation matrix
Examples
--------
>>> from modopt.math.matrix import rot_matrix
>>> rot_matrix(np.pi / 6)
array([[ 0.8660254, -0.5 ],
[ 0.5 , 0.8660254]])
Notes
-----
Implements the following equation:
.. math::
R(\theta) = \begin{bmatrix}
\cos(\theta) & -\sin(\theta) \\
\sin(\theta) & \cos(\theta)
\end{bmatrix}
"""
return np.around(np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]], dtype='float'), 10)
|
r"""Rotation matrix
This method produces a 2x2 rotation matrix for the given input angle.
Parameters
----------
angle : float
Rotation angle in radians
Returns
-------
np.ndarray 2x2 rotation matrix
Examples
--------
>>> from modopt.math.matrix import rot_matrix
>>> rot_matrix(np.pi / 6)
array([[ 0.8660254, -0.5 ],
[ 0.5 , 0.8660254]])
Notes
-----
Implements the following equation:
.. math::
R(\theta) = \begin{bmatrix}
\cos(\theta) & -\sin(\theta) \\
\sin(\theta) & \cos(\theta)
\end{bmatrix}
|
def track_enrollment(pathway, user_id, course_run_id, url_path=None):
"""
Emit a track event for enterprise course enrollment.
"""
track_event(user_id, 'edx.bi.user.enterprise.onboarding', {
'pathway': pathway,
'url_path': url_path,
'course_run_id': course_run_id,
})
|
Emit a track event for enterprise course enrollment.
|
def close(self, figs=True, data=False, ds=False, remove_only=False):
"""
Close this project instance
Parameters
----------
figs: bool
Close the figures
data: bool
delete the arrays from the (main) project
ds: bool
If True, close the dataset as well
remove_only: bool
If True and `figs` is True, the figures are not closed but the
plotters are removed"""
import matplotlib.pyplot as plt
close_ds = ds
for arr in self[:]:
if figs and arr.psy.plotter is not None:
if remove_only:
for fmto in arr.psy.plotter._fmtos:
try:
fmto.remove()
except Exception:
pass
else:
plt.close(arr.psy.plotter.ax.get_figure().number)
arr.psy.plotter = None
if data:
self.remove(arr)
if not self.is_main:
try:
self.main.remove(arr)
except ValueError: # arr not in list
pass
if close_ds:
if isinstance(arr, InteractiveList):
for ds in [val['ds'] for val in six.itervalues(
arr._get_ds_descriptions(
arr.array_info(ds_description=['ds'],
standardize_dims=False)))]:
ds.close()
else:
arr.psy.base.close()
if self.is_main and self is gcp(True) and data:
scp(None)
elif self.is_main and self.is_cmp:
self.oncpchange.emit(self)
elif self.main.is_cmp:
self.oncpchange.emit(self.main)
|
Close this project instance
Parameters
----------
figs: bool
Close the figures
data: bool
delete the arrays from the (main) project
ds: bool
If True, close the dataset as well
remove_only: bool
If True and `figs` is True, the figures are not closed but the
plotters are removed
|
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_interface_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
remote_interface_mac = ET.SubElement(lldp_neighbor_detail, "remote-interface-mac")
remote_interface_mac.text = kwargs.pop('remote_interface_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def check(self, instance):
"""
Returns a dictionary that looks a lot like what's sent back by
db.serverStatus()
"""
def total_seconds(td):
"""
Returns total seconds of a timedelta in a way that's safe for
Python < 2.7
"""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return (lag.microseconds + (lag.seconds + lag.days * 24 * 3600) * 10 ** 6) / 10.0 ** 6
if 'server' not in instance:
raise Exception("Missing 'server' in mongo config")
# x.509 authentication
ssl_params = {
'ssl': instance.get('ssl', None),
'ssl_keyfile': instance.get('ssl_keyfile', None),
'ssl_certfile': instance.get('ssl_certfile', None),
'ssl_cert_reqs': instance.get('ssl_cert_reqs', None),
'ssl_ca_certs': instance.get('ssl_ca_certs', None),
}
for key, param in list(iteritems(ssl_params)):
if param is None:
del ssl_params[key]
server = instance['server']
username, password, db_name, nodelist, clean_server_name, auth_source = self._parse_uri(
server, sanitize_username=bool(ssl_params)
)
additional_metrics = instance.get('additional_metrics', [])
# Get the list of metrics to collect
collect_tcmalloc_metrics = 'tcmalloc' in additional_metrics
metrics_to_collect = self._get_metrics_to_collect(server, additional_metrics)
# Tagging
tags = instance.get('tags', [])
# ...de-dupe tags to avoid a memory leak
tags = list(set(tags))
if not db_name:
self.log.info('No MongoDB database found in URI. Defaulting to admin.')
db_name = 'admin'
service_check_tags = ["db:%s" % db_name]
service_check_tags.extend(tags)
# ...add the `server` tag to the metrics' tags only
# (it's added in the backend for service checks)
tags.append('server:%s' % clean_server_name)
if nodelist:
host = nodelist[0][0]
port = nodelist[0][1]
service_check_tags = service_check_tags + ["host:%s" % host, "port:%s" % port]
timeout = float(instance.get('timeout', DEFAULT_TIMEOUT)) * 1000
try:
cli = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=timeout,
connectTimeoutMS=timeout,
serverSelectionTimeoutMS=timeout,
read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED,
**ssl_params
)
# some commands can only go against the admin DB
admindb = cli['admin']
db = cli[db_name]
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags)
raise
# Authenticate
do_auth = True
use_x509 = ssl_params and not password
if not username:
self.log.debug(u"A username is required to authenticate to `%s`", server)
do_auth = False
if do_auth:
if auth_source:
msg = "authSource was specified in the the server URL: using '%s' as the authentication database"
self.log.info(msg, auth_source)
self._authenticate(
cli[auth_source], username, password, use_x509, clean_server_name, service_check_tags
)
else:
self._authenticate(db, username, password, use_x509, clean_server_name, service_check_tags)
try:
status = db.command('serverStatus', tcmalloc=collect_tcmalloc_metrics)
except Exception:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=service_check_tags)
raise
else:
self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags)
if status['ok'] == 0:
raise Exception(status['errmsg'].__str__())
ops = db.current_op()
status['fsyncLocked'] = 1 if ops.get('fsyncLock') else 0
status['stats'] = db.command('dbstats')
dbstats = {db_name: {'stats': status['stats']}}
# Handle replica data, if any
# See
# http://www.mongodb.org/display/DOCS/Replica+Set+Commands#ReplicaSetCommands-replSetGetStatus # noqa
if is_affirmative(instance.get('replica_check', True)):
try:
data = {}
replSet = admindb.command('replSetGetStatus')
if replSet:
primary = None
current = None
# need a new connection to deal with replica sets
setname = replSet.get('set')
cli_rs = pymongo.mongo_client.MongoClient(
server,
socketTimeoutMS=timeout,
connectTimeoutMS=timeout,
serverSelectionTimeoutMS=timeout,
replicaset=setname,
read_preference=pymongo.ReadPreference.NEAREST,
**ssl_params
)
if do_auth:
if auth_source:
self._authenticate(
cli_rs[auth_source], username, password, use_x509, server, service_check_tags
)
else:
self._authenticate(
cli_rs[db_name], username, password, use_x509, server, service_check_tags
)
# Replication set information
replset_name = replSet['set']
replset_state = self.get_state_name(replSet['myState']).lower()
tags.extend([u"replset_name:{0}".format(replset_name), u"replset_state:{0}".format(replset_state)])
# Find nodes: master and current node (ourself)
for member in replSet.get('members'):
if member.get('self'):
current = member
if int(member.get('state')) == 1:
primary = member
# Compute a lag time
if current is not None and primary is not None:
if 'optimeDate' in primary and 'optimeDate' in current:
lag = primary['optimeDate'] - current['optimeDate']
data['replicationLag'] = total_seconds(lag)
if current is not None:
data['health'] = current['health']
data['state'] = replSet['myState']
if current is not None:
total = 0.0
cfg = cli_rs['local']['system.replset'].find_one()
for member in cfg.get('members'):
total += member.get('votes', 1)
if member['_id'] == current['_id']:
data['votes'] = member.get('votes', 1)
data['voteFraction'] = data['votes'] / total
status['replSet'] = data
# Submit events
self._report_replica_set_state(data['state'], clean_server_name, replset_name)
except Exception as e:
if "OperationFailure" in repr(e) and (
"not running with --replSet" in str(e) or "replSetGetStatus" in str(e)
):
pass
else:
raise e
# If these keys exist, remove them for now as they cannot be serialized
try:
status['backgroundFlushing'].pop('last_finished')
except KeyError:
pass
try:
status.pop('localTime')
except KeyError:
pass
dbnames = cli.database_names()
self.gauge('mongodb.dbs', len(dbnames), tags=tags)
for db_n in dbnames:
db_aux = cli[db_n]
dbstats[db_n] = {'stats': db_aux.command('dbstats')}
# Go through the metrics and save the values
for metric_name in metrics_to_collect:
# each metric is of the form: x.y.z with z optional
# and can be found at status[x][y][z]
value = status
if metric_name.startswith('stats'):
continue
else:
try:
for c in metric_name.split("."):
value = value[c]
except KeyError:
continue
# value is now status[x][y][z]
if not isinstance(value, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead.".format(
metric_name, type(value)
)
)
# Submit the metric
submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)
submit_method(self, metric_name_alias, value, tags=tags)
for st, value in iteritems(dbstats):
for metric_name in metrics_to_collect:
if not metric_name.startswith('stats.'):
continue
try:
val = value['stats'][metric_name.split('.')[1]]
except KeyError:
continue
# value is now status[x][y][z]
if not isinstance(val, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead.".format(
metric_name, type(val)
)
)
# Submit the metric
metrics_tags = tags + [
u"cluster:db:{0}".format(st), # FIXME 6.0 - keep for backward compatibility
u"db:{0}".format(st),
]
submit_method, metric_name_alias = self._resolve_metric(metric_name, metrics_to_collect)
submit_method(self, metric_name_alias, val, tags=metrics_tags)
if is_affirmative(instance.get('collections_indexes_stats')):
mongo_version = cli.server_info().get('version', '0.0')
if LooseVersion(mongo_version) >= LooseVersion("3.2"):
self._collect_indexes_stats(instance, db, tags)
else:
msg = "'collections_indexes_stats' is only available starting from mongo 3.2: your mongo version is %s"
self.log.error(msg, mongo_version)
# Report the usage metrics for dbs/collections
if 'top' in additional_metrics:
try:
dbtop = admindb.command('top')
for ns, ns_metrics in iteritems(dbtop['totals']):
if "." not in ns:
continue
# configure tags for db name and collection name
dbname, collname = ns.split(".", 1)
ns_tags = tags + ["db:%s" % dbname, "collection:%s" % collname]
# iterate over DBTOP metrics
for m in self.TOP_METRICS:
# each metric is of the form: x.y.z with z optional
# and can be found at ns_metrics[x][y][z]
value = ns_metrics
try:
for c in m.split("."):
value = value[c]
except Exception:
continue
# value is now status[x][y][z]
if not isinstance(value, (int, long, float)):
raise TypeError(
u"{0} value is a {1}, it should be an int, a float or a long instead.".format(
m, type(value)
)
)
# Submit the metric
submit_method, metric_name_alias = self._resolve_metric(m, metrics_to_collect, prefix="usage")
submit_method(self, metric_name_alias, value, tags=ns_tags)
# Keep old incorrect metric
if metric_name_alias.endswith('countps'):
GAUGE(self, metric_name_alias[:-2], value, tags=ns_tags)
except Exception as e:
self.log.warning('Failed to record `top` metrics %s' % str(e))
if 'local' in dbnames: # it might not be if we are connectiing through mongos
# Fetch information analogous to Mongo's db.getReplicationInfo()
localdb = cli['local']
oplog_data = {}
for ol_collection_name in ("oplog.rs", "oplog.$main"):
ol_options = localdb[ol_collection_name].options()
if ol_options:
break
if ol_options:
try:
oplog_data['logSizeMB'] = round_value(ol_options['size'] / 2.0 ** 20, 2)
oplog = localdb[ol_collection_name]
oplog_data['usedSizeMB'] = round_value(
localdb.command("collstats", ol_collection_name)['size'] / 2.0 ** 20, 2
)
op_asc_cursor = oplog.find({"ts": {"$exists": 1}}).sort("$natural", pymongo.ASCENDING).limit(1)
op_dsc_cursor = oplog.find({"ts": {"$exists": 1}}).sort("$natural", pymongo.DESCENDING).limit(1)
try:
first_timestamp = op_asc_cursor[0]['ts'].as_datetime()
last_timestamp = op_dsc_cursor[0]['ts'].as_datetime()
oplog_data['timeDiff'] = total_seconds(last_timestamp - first_timestamp)
except (IndexError, KeyError):
# if the oplog collection doesn't have any entries
# if an object in the collection doesn't have a ts value, we ignore it
pass
except KeyError:
# encountered an error trying to access options.size for the oplog collection
self.log.warning(u"Failed to record `ReplicationInfo` metrics.")
for m, value in iteritems(oplog_data):
submit_method, metric_name_alias = self._resolve_metric('oplog.%s' % m, metrics_to_collect)
submit_method(self, metric_name_alias, value, tags=tags)
else:
self.log.debug('"local" database not in dbnames. Not collecting ReplicationInfo metrics')
# get collection level stats
try:
# Ensure that you're on the right db
db = cli[db_name]
# grab the collections from the configutation
coll_names = instance.get('collections', [])
# loop through the collections
for coll_name in coll_names:
# grab the stats from the collection
stats = db.command("collstats", coll_name)
# loop through the metrics
for m in self.collection_metrics_names:
coll_tags = tags + ["db:%s" % db_name, "collection:%s" % coll_name]
value = stats.get(m, None)
if not value:
continue
# if it's the index sizes, then it's a dict.
if m == 'indexSizes':
submit_method, metric_name_alias = self._resolve_metric(
'collection.%s' % m, self.COLLECTION_METRICS
)
# loop through the indexes
for idx, val in iteritems(value):
# we tag the index
idx_tags = coll_tags + ["index:%s" % idx]
submit_method(self, metric_name_alias, val, tags=idx_tags)
else:
submit_method, metric_name_alias = self._resolve_metric(
'collection.%s' % m, self.COLLECTION_METRICS
)
submit_method(self, metric_name_alias, value, tags=coll_tags)
except Exception as e:
self.log.warning(u"Failed to record `collection` metrics.")
self.log.exception(e)
|
Returns a dictionary that looks a lot like what's sent back by
db.serverStatus()
|
def _validate_header(self, hed):
"""
Validate the list that represents the table header.
:param hed: The list that represents the table header.
:type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
:return: True if the table header is valid or False if the table header
is not valid.
:rtype: bool
"""
# pylint: disable=no-self-use
if not bool(hed):
return False
length = -1
for row in hed:
if not bool(row):
return False
elif length == -1:
length = len(row)
elif len(row) != length:
return False
return True
|
Validate the list that represents the table header.
:param hed: The list that represents the table header.
:type hed: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
:return: True if the table header is valid or False if the table header
is not valid.
:rtype: bool
|
def _minimal_common_integer_splitted(si_0, si_1):
"""
Calculates the minimal integer that appears in both StridedIntervals.
It's equivalent to finding an integral solution for equation `ax + b = cy + d` that makes `ax + b` minimal
si_0.stride, si_1.stride being a and c, and si_0.lower_bound, si_1.lower_bound being b and d, respectively.
Upper bounds are used to check whether the minimal common integer exceeds the bound or not. None is returned
if no minimal common integers can be found within the range.
Some assumptions:
# - None of the StridedIntervals straddles the south pole. Consequently, we have x <= max_int(si.bits) and y <=
# max_int(si.bits)
# - a, b, c, d are all positive integers
# - x >= 0, y >= 0
:param StridedInterval si_0: the first StridedInterval
:param StridedInterval si_1: the second StrideInterval
:return: the minimal common integer, or None if there is no common integer
"""
a, c = si_0.stride, si_1.stride
b, d = si_0.lower_bound, si_1.lower_bound
# if any of them is an integer
if si_0.is_integer:
if si_1.is_integer:
return None if si_0.lower_bound != si_1.lower_bound else si_0.lower_bound
elif si_0.lower_bound >= si_1.lower_bound and \
si_0.lower_bound <= si_1.upper_bound and \
(si_0.lower_bound - si_1.lower_bound) % si_1.stride == 0:
return si_0.lower_bound
else:
return None
elif si_1.is_integer:
return StridedInterval._minimal_common_integer_splitted(si_1, si_0)
# shortcut
if si_0.upper_bound < si_1.lower_bound or si_1.upper_bound < si_0.lower_bound:
# They don't overlap at all
return None
if (d - b) % StridedInterval.gcd(a, c) != 0:
# They don't overlap
return None
"""
Given two strided intervals a = sa[lba, uba] and b = sb[lbb, ubb], the first integer shared
by them is found by finding the minimum values of ka and kb which solve the equation:
ka * sa + lba = kb * sb + lbb
In particular one can solve the above diophantine equation and find the parameterized solutions
of ka and kb, with respect to a parameter t.
The minimum natural value of the parameter t which gives two positive natural values of ka and kb
is used to resolve ka and kb, and finally to solve the above equation and get the minimum shared integer.
"""
x, y = StridedInterval.diop_natural_solution_linear(-(b-d), a, -c)
if a is None or b is None:
return None
first_integer = x * a + b
assert first_integer == y*c + d
if first_integer >= si_0.lower_bound and first_integer <= si_0.upper_bound and \
first_integer >= si_1.lower_bound and first_integer <= si_1.upper_bound:
return first_integer
else:
return None
|
Calculates the minimal integer that appears in both StridedIntervals.
It's equivalent to finding an integral solution for equation `ax + b = cy + d` that makes `ax + b` minimal
si_0.stride, si_1.stride being a and c, and si_0.lower_bound, si_1.lower_bound being b and d, respectively.
Upper bounds are used to check whether the minimal common integer exceeds the bound or not. None is returned
if no minimal common integers can be found within the range.
Some assumptions:
# - None of the StridedIntervals straddles the south pole. Consequently, we have x <= max_int(si.bits) and y <=
# max_int(si.bits)
# - a, b, c, d are all positive integers
# - x >= 0, y >= 0
:param StridedInterval si_0: the first StridedInterval
:param StridedInterval si_1: the second StrideInterval
:return: the minimal common integer, or None if there is no common integer
|
def aggregate(self):
"""Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are.
"""
_, indices, inverse = np.unique(self.record.sample, axis=0,
return_index=True, return_inverse=True)
# unique also sorts the array which we don't want, so we undo the sort
order = np.argsort(indices)
indices = indices[order]
record = self.record[indices]
# fix the number of occurrences
record.num_occurrences = 0
for old_idx, new_idx in enumerate(inverse):
new_idx = order[new_idx]
record[new_idx].num_occurrences += self.record[old_idx].num_occurrences
# dev note: we don't check the energies as they should be the same
# for individual samples
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype)
|
Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are.
|
def _config_sortable(self, sortable):
"""Configure a new sortable state"""
for col in self["columns"]:
command = (lambda c=col: self._sort_column(c, True)) if sortable else ""
self.heading(col, command=command)
self._sortable = sortable
|
Configure a new sortable state
|
def from_config(config, **options):
"""Instantiate an `RotatedEventStore` from config.
Parameters:
_config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `RotatedEventStore`.
"""
expected_args = ('prefix', 'realclass')
for arg in expected_args:
if arg not in options:
msg = "Required option missing: {0}"
raise rconfig.ConfigurationError(msg.format(arg))
# Not logging unrecognized options here, because they might be used
# by the real event store instantiated below.
classpath = options['realclass']
classpath_pieces = classpath.split('.')
classname = classpath_pieces[-1]
modulepath = '.'.join(classpath_pieces[0:-1])
module = importlib.import_module(modulepath)
estore_class = getattr(module, classname)
return RotatedEventStore(lambda fname: estore_class(fname),
options['path'], options['prefix'])
|
Instantiate an `RotatedEventStore` from config.
Parameters:
_config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `RotatedEventStore`.
|
def format_log_context(msg, connection=None, keyspace=None):
"""Format log message to add keyspace and connection context"""
connection_info = connection or 'DEFAULT_CONNECTION'
if keyspace:
msg = '[Connection: {0}, Keyspace: {1}] {2}'.format(connection_info, keyspace, msg)
else:
msg = '[Connection: {0}] {1}'.format(connection_info, msg)
return msg
|
Format log message to add keyspace and connection context
|
def make_datalab_help_action(self):
"""Custom action for --datalab-help.
The action output the package specific parameters and will be part of "%%ml train"
help string.
"""
datalab_help = self.datalab_help
epilog = self.datalab_epilog
class _CustomAction(argparse.Action):
def __init__(self, option_strings, dest, help=None):
super(_CustomAction, self).__init__(
option_strings=option_strings, dest=dest, nargs=0, help=help)
def __call__(self, parser, args, values, option_string=None):
print('\n\n'.join(datalab_help))
if epilog:
print(epilog)
# We have printed all help string datalab needs. If we don't quit, it will complain about
# missing required arguments later.
quit()
return _CustomAction
|
Custom action for --datalab-help.
The action output the package specific parameters and will be part of "%%ml train"
help string.
|
def do_checkout(self, repo):
'''
Common code for git_pillar/winrepo to handle locking and checking out
of a repo.
'''
time_start = time.time()
while time.time() - time_start <= 5:
try:
return repo.checkout()
except GitLockError as exc:
if exc.errno == errno.EEXIST:
time.sleep(0.1)
continue
else:
log.error(
'Error %d encountered while obtaining checkout '
'lock for %s remote \'%s\': %s',
exc.errno,
repo.role,
repo.id,
exc,
exc_info=True
)
break
else:
log.error(
'Timed out waiting for checkout lock to be released for '
'%s remote \'%s\'. If this error persists, run \'salt-run '
'cache.clear_git_lock %s type=checkout\' to clear it.',
self.role, repo.id, self.role
)
return None
|
Common code for git_pillar/winrepo to handle locking and checking out
of a repo.
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: EnvironmentContext for this EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentContext
"""
if self._context is None:
self._context = EnvironmentContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: EnvironmentContext for this EnvironmentInstance
:rtype: twilio.rest.serverless.v1.service.environment.EnvironmentContext
|
def dodirot(D, I, Dbar, Ibar):
"""
Rotate a direction (declination, inclination) by the difference between
dec=0 and inc = 90 and the provided desired mean direction
Parameters
----------
D : declination to be rotated
I : inclination to be rotated
Dbar : declination of desired mean
Ibar : inclination of desired mean
Returns
----------
drot, irot : rotated declination and inclination
"""
d, irot = dogeo(D, I, Dbar, 90. - Ibar)
drot = d - 180.
if drot < 360.:
drot = drot + 360.
if drot > 360.:
drot = drot - 360.
return drot, irot
|
Rotate a direction (declination, inclination) by the difference between
dec=0 and inc = 90 and the provided desired mean direction
Parameters
----------
D : declination to be rotated
I : inclination to be rotated
Dbar : declination of desired mean
Ibar : inclination of desired mean
Returns
----------
drot, irot : rotated declination and inclination
|
def get_bond_symmetry(site_symmetry,
lattice,
positions,
atom_center,
atom_disp,
symprec=1e-5):
"""
Bond symmetry is the symmetry operations that keep the symmetry
of the cell containing two fixed atoms.
"""
bond_sym = []
pos = positions
for rot in site_symmetry:
rot_pos = (np.dot(pos[atom_disp] - pos[atom_center], rot.T) +
pos[atom_center])
diff = pos[atom_disp] - rot_pos
diff -= np.rint(diff)
dist = np.linalg.norm(np.dot(lattice, diff))
if dist < symprec:
bond_sym.append(rot)
return np.array(bond_sym)
|
Bond symmetry is the symmetry operations that keep the symmetry
of the cell containing two fixed atoms.
|
def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Tests if a particular fractional coord is within a fractional coord_list.
Args:
fcoord_list: List of fractional coords to test
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0
|
Tests if a particular fractional coord is within a fractional coord_list.
Args:
fcoord_list: List of fractional coords to test
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
True if coord is in the coord list.
|
def serialize_relations(pid):
"""Serialize the relations for given PID."""
data = {}
relations = PIDRelation.get_child_relations(pid).all()
for relation in relations:
rel_cfg = resolve_relation_type_config(relation.relation_type)
dump_relation(rel_cfg.api(relation.parent),
rel_cfg, pid, data)
parent_relations = PIDRelation.get_parent_relations(pid).all()
rel_cfgs = set([resolve_relation_type_config(p) for p in parent_relations])
for rel_cfg in rel_cfgs:
dump_relation(rel_cfg.api(pid), rel_cfg, pid, data)
return data
|
Serialize the relations for given PID.
|
def show_feature_destibution(self, data = None):
"""!
@brief Shows feature distribution.
@details Only features in 1D, 2D, 3D space can be visualized.
@param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only.
"""
visualizer = cluster_visualizer();
print("amount of nodes: ", self.__amount_nodes);
if (data is not None):
visualizer.append_cluster(data, marker = 'x');
for level in range(0, self.height):
level_nodes = self.get_level_nodes(level);
centers = [ node.feature.get_centroid() for node in level_nodes ];
visualizer.append_cluster(centers, None, markersize = (self.height - level + 1) * 5);
visualizer.show();
|
!
@brief Shows feature distribution.
@details Only features in 1D, 2D, 3D space can be visualized.
@param[in] data (list): List of points that will be used for visualization, if it not specified than feature will be displayed only.
|
def to_json(self):
"""
Serialises a HucitWork to a JSON formatted string.
"""
titles = self.get_titles()
return json.dumps({
"uri" : self.subject
, "urn" : str(self.get_urn())
, "titles" : [{"language":lang, "label":label} for lang, label in titles]
, "title_abbreviations" : self.get_abbreviations()
}, indent=2)
|
Serialises a HucitWork to a JSON formatted string.
|
def set_info_page(self):
"""Set current info_page."""
if self.info_page is not None:
self.infowidget.setHtml(
self.info_page,
QUrl.fromLocalFile(self.css_path)
)
|
Set current info_page.
|
def _expand_user(filepath_or_buffer):
"""Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, str):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
|
Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
|
def add(args):
"""
Add a new feed
"""
session = c.Session(args)
if args["name"] in session.feeds.sections():
sys.exit("You already have a feed with that name.")
if args["name"] in ["all", "DEFAULT"]:
sys.exit(
("greg uses ""{}"" for a special purpose."
"Please choose another name for your feed.").format(args["name"]))
entry = {}
for key, value in args.items():
if value is not None and key != "func" and key != "name":
entry[key] = value
session.feeds[args["name"]] = entry
with open(session.data_filename, 'w') as configfile:
session.feeds.write(configfile)
|
Add a new feed
|
def get_branch(self):
"""
:return:
"""
if self.repo.head.is_detached:
if os.getenv('GIT_BRANCH'):
branch = os.getenv('GIT_BRANCH')
elif os.getenv('BRANCH_NAME'):
branch = os.getenv('BRANCH_NAME')
elif os.getenv('TRAVIS_BRANCH'):
branch = os.getenv('TRAVIS_BRANCH')
else:
branch = "HEAD"
else:
branch = str(self.repo.active_branch)
return branch.replace("/", "_")
|
:return:
|
def cmd_alt(self, args):
'''show altitude'''
print("Altitude: %.1f" % self.status.altitude)
qnh_pressure = self.get_mav_param('AFS_QNH_PRESSURE', None)
if qnh_pressure is not None and qnh_pressure > 0:
ground_temp = self.get_mav_param('GND_TEMP', 21)
pressure = self.master.field('SCALED_PRESSURE', 'press_abs', 0)
qnh_alt = self.altitude_difference(qnh_pressure, pressure, ground_temp)
print("QNH Alt: %u meters %u feet for QNH pressure %.1f" % (qnh_alt, qnh_alt*3.2808, qnh_pressure))
print("QNH Estimate: %.1f millibars" % self.qnh_estimate())
|
show altitude
|
def get_list(self, key, fallback=None, split=","):
"""
Retrieve a value in list form.
The interpolated value will be split on some key (by default, ',') and
the resulting list will be returned.
Arguments:
key - the key to return
fallback - The result to return if key isn't in the component. By
default, this will be an empty list.
split - The key to split the value on. By default, a comma (,).
"""
fallback = fallback or []
raw = self.get(key, None)
if raw:
return [value.strip() for value in raw.split(split)]
return fallback
|
Retrieve a value in list form.
The interpolated value will be split on some key (by default, ',') and
the resulting list will be returned.
Arguments:
key - the key to return
fallback - The result to return if key isn't in the component. By
default, this will be an empty list.
split - The key to split the value on. By default, a comma (,).
|
def get_kbd_values_json(kbname, searchwith=""):
"""Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
"""
res = get_kbd_values(kbname, searchwith)
return json.dumps(res)
|
Return values from searching a dynamic kb as a json-formatted string.
This IS probably the method you want.
:param kbname: name of the knowledge base
:param searchwith: a term to search with
|
def extract_rar (archive, compression, cmd, verbosity, interactive, outdir):
"""Extract a RAR archive."""
cmdlist = [cmd, 'x']
if not interactive:
cmdlist.extend(['-p-', '-y'])
cmdlist.extend(['--', os.path.abspath(archive)])
return (cmdlist, {'cwd': outdir})
|
Extract a RAR archive.
|
async def _seed2did(self) -> str:
"""
Derive DID, as per indy-sdk, from seed.
:return: DID
"""
rv = None
dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list
if dids_with_meta:
for did_with_meta in dids_with_meta: # dict
if 'metadata' in did_with_meta:
try:
meta = json.loads(did_with_meta['metadata'])
if isinstance(meta, dict) and meta.get('seed', None) == self._seed:
rv = did_with_meta.get('did')
except json.decoder.JSONDecodeError:
continue # it's not one of ours, carry on
if not rv: # seed not in metadata, generate did again on temp wallet
temp_wallet = await Wallet(
self._seed,
'{}.seed2did'.format(self.name),
None,
{'auto-remove': True}).create()
rv = temp_wallet.did
await temp_wallet.remove()
return rv
|
Derive DID, as per indy-sdk, from seed.
:return: DID
|
def draw_mask(self, image_shape, size_lines=1, size_points=0,
raise_if_out_of_image=False):
"""
Draw this line segment as a binary image mask.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the line mask.
size_lines : int, optional
Thickness of the line segments.
size_points : int, optional
Size of the points in pixels.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Boolean line mask of shape `image_shape` (no channel axis).
"""
heatmap = self.draw_heatmap_array(
image_shape,
alpha_lines=1.0, alpha_points=1.0,
size_lines=size_lines, size_points=size_points,
antialiased=False,
raise_if_out_of_image=raise_if_out_of_image)
return heatmap > 0.5
|
Draw this line segment as a binary image mask.
Parameters
----------
image_shape : tuple of int
The shape of the image onto which to draw the line mask.
size_lines : int, optional
Thickness of the line segments.
size_points : int, optional
Size of the points in pixels.
raise_if_out_of_image : bool, optional
Whether to raise an error if the line string is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
ndarray
Boolean line mask of shape `image_shape` (no channel axis).
|
async def collect_wallets(self, uid):
"""
Asynchronous generator
"""
logging.debug(self.types)
logging.debug(uid)
for coinid in self.types:
logging.debug(coinid)
await asyncio.sleep(0.5)
# Connect to appropriate database
database = self.client[self.collection]
logging.debug(database)
collection = database[coinid]
logging.debug(collection)
# Get wallets
wallet = await collection.find_one({"uid":int(uid)})
logging.debug(wallet)
wallet["amount_active"] = int(wallet["amount_active"])
wallet["amount_frozen"] = int(wallet["amount_frozen"])
del wallet["_id"]
yield wallet
|
Asynchronous generator
|
def get_version_status(
package_descriptors, targets, repos_data,
strip_version=False, strip_os_code_name=False):
"""
For each package and target check if it is affected by a sync.
This is the case when the package version in the testing repo is different
from the version in the main repo.
:return: a dict indexed by package names containing
dicts indexed by targets containing
a list of status strings (one for each repo)
"""
status = {}
for package_descriptor in package_descriptors.values():
pkg_name = package_descriptor.pkg_name
debian_pkg_name = package_descriptor.debian_pkg_name
ref_version = package_descriptor.version
if strip_version:
ref_version = _strip_version_suffix(ref_version)
status[pkg_name] = {}
for target in targets:
statuses = []
for repo_data in repos_data:
version = repo_data.get(target, {}).get(debian_pkg_name, None)
if strip_version:
version = _strip_version_suffix(version)
if strip_os_code_name:
version = _strip_os_code_name_suffix(
version, target.os_code_name)
if ref_version:
if not version:
statuses.append('missing')
elif version.startswith(ref_version): # including equal
statuses.append('equal')
else:
if _version_is_gt_other(version, ref_version):
statuses.append('higher')
else:
statuses.append('lower')
else:
if not version:
statuses.append('ignore')
else:
statuses.append('obsolete')
status[pkg_name][target] = statuses
return status
|
For each package and target check if it is affected by a sync.
This is the case when the package version in the testing repo is different
from the version in the main repo.
:return: a dict indexed by package names containing
dicts indexed by targets containing
a list of status strings (one for each repo)
|
def kmcop(args):
"""
%prog kmcop *.kmc_suf
Intersect or union kmc indices.
"""
p = OptionParser(kmcop.__doc__)
p.add_option("--action", choices=("union", "intersect"),
default="union", help="Action")
p.add_option("-o", default="results", help="Output name")
opts, args = p.parse_args(args)
if len(args) < 2:
sys.exit(not p.print_help())
indices = args
ku = KMCComplex(indices)
ku.write(opts.o, action=opts.action)
|
%prog kmcop *.kmc_suf
Intersect or union kmc indices.
|
def predict(fqdn, result, *argl, **argd):
"""Analyzes the result of a generic predict operation performed by
`sklearn`.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call.
"""
#Check the arguments to see what kind of data we are working with, then
#choose the appropriate function below to return the analysis dictionary.
out = None
if len(argl) > 0:
machine = argl[0]
if isclassifier(machine):
out = classify_predict(fqdn, result, None, *argl, **argd)
elif isregressor(machine):
out = regress_predict(fqdn, result, None, *argl, **argd)
return out
|
Analyzes the result of a generic predict operation performed by
`sklearn`.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call.
|
def draw_rect(self, color, world_rect, thickness=0):
"""Draw a rectangle using world coordinates."""
tl = self.world_to_surf.fwd_pt(world_rect.tl).round()
br = self.world_to_surf.fwd_pt(world_rect.br).round()
rect = pygame.Rect(tl, br - tl)
pygame.draw.rect(self.surf, color, rect, thickness)
|
Draw a rectangle using world coordinates.
|
def package_releases(request, package_name, show_hidden=False):
"""
Retrieve a list of the releases registered for the given package_name.
Returns a list with all version strings if show_hidden is True or
only the non-hidden ones otherwise."""
session = DBSession()
package = Package.by_name(session, package_name)
return [rel.version for rel in package.sorted_releases]
|
Retrieve a list of the releases registered for the given package_name.
Returns a list with all version strings if show_hidden is True or
only the non-hidden ones otherwise.
|
async def add_participant(self, display_name: str = None, username: str = None, email: str = None, seed: int = 0, misc: str = None, **params):
""" add a participant to the tournament
|methcoro|
Args:
display_name: The name displayed in the bracket/schedule - not required if email or challonge_username is provided. Must be unique per tournament.
username: Provide this if the participant has a Challonge account. He or she will be invited to the tournament.
email: Providing this will first search for a matching Challonge account. If one is found, this will have the same effect as the "challonge_username" attribute. If one is not found, the "new-user-email" attribute will be set, and the user will be invited via email to create an account.
seed: The participant's new seed. Must be between 1 and the current number of participants (including the new record). Overwriting an existing seed will automatically bump other participants as you would expect.
misc: Max: 255 characters. Multi-purpose field that is only visible via the API and handy for site integration (e.g. key to your users table)
params: optional params (see http://api.challonge.com/v1/documents/participants/create)
Returns:
Participant: newly created participant
Raises:
APIException
"""
assert_or_raise((display_name is None) ^ (username is None),
ValueError,
'One of display_name or username must not be None')
params.update({
'name': display_name or '',
'challonge_username': username or '',
})
if email is not None:
params.update({'email': email})
if seed != 0:
params.update({'seed': seed})
if misc is not None:
params.update({'misc': misc})
res = await self.connection('POST',
'tournaments/{}/participants'.format(self._id),
'participant',
**params)
new_p = self._create_participant(res)
self._add_participant(new_p)
return new_p
|
add a participant to the tournament
|methcoro|
Args:
display_name: The name displayed in the bracket/schedule - not required if email or challonge_username is provided. Must be unique per tournament.
username: Provide this if the participant has a Challonge account. He or she will be invited to the tournament.
email: Providing this will first search for a matching Challonge account. If one is found, this will have the same effect as the "challonge_username" attribute. If one is not found, the "new-user-email" attribute will be set, and the user will be invited via email to create an account.
seed: The participant's new seed. Must be between 1 and the current number of participants (including the new record). Overwriting an existing seed will automatically bump other participants as you would expect.
misc: Max: 255 characters. Multi-purpose field that is only visible via the API and handy for site integration (e.g. key to your users table)
params: optional params (see http://api.challonge.com/v1/documents/participants/create)
Returns:
Participant: newly created participant
Raises:
APIException
|
def percentile(values=None, percentile=None):
"""Calculates a simplified weighted average percentile
"""
if values in [None, tuple(), []] or len(values) < 1:
raise InsufficientData(
"Expected a sequence of at least 1 integers, got {0!r}".format(values))
if percentile is None:
raise ValueError("Expected a percentile choice, got {0}".format(percentile))
sorted_values = sorted(values)
rank = len(values) * percentile / 100
if rank > 0:
index = rank - 1
if index < 0:
return sorted_values[0]
else:
index = rank
if index % 1 == 0:
return sorted_values[int(index)]
else:
fractional = index % 1
integer = int(index - fractional)
lower = sorted_values[integer]
higher = sorted_values[integer + 1]
return lower + fractional * (higher - lower)
|
Calculates a simplified weighted average percentile
|
def flowwrite(flow, filename, quantize=False, concat_axis=0, *args, **kwargs):
"""Write optical flow to file.
If the flow is not quantized, it will be saved as a .flo file losslessly,
otherwise a jpeg image which is lossy but of much smaller size. (dx and dy
will be concatenated horizontally into a single image if quantize is True.)
Args:
flow (ndarray): (h, w, 2) array of optical flow.
filename (str): Output filepath.
quantize (bool): Whether to quantize the flow and save it to 2 jpeg
images. If set to True, remaining args will be passed to
:func:`quantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
"""
if not quantize:
with open(filename, 'wb') as f:
f.write('PIEH'.encode('utf-8'))
np.array([flow.shape[1], flow.shape[0]], dtype=np.int32).tofile(f)
flow = flow.astype(np.float32)
flow.tofile(f)
f.flush()
else:
assert concat_axis in [0, 1]
dx, dy = quantize_flow(flow, *args, **kwargs)
dxdy = np.concatenate((dx, dy), axis=concat_axis)
imwrite(dxdy, filename)
|
Write optical flow to file.
If the flow is not quantized, it will be saved as a .flo file losslessly,
otherwise a jpeg image which is lossy but of much smaller size. (dx and dy
will be concatenated horizontally into a single image if quantize is True.)
Args:
flow (ndarray): (h, w, 2) array of optical flow.
filename (str): Output filepath.
quantize (bool): Whether to quantize the flow and save it to 2 jpeg
images. If set to True, remaining args will be passed to
:func:`quantize_flow`.
concat_axis (int): The axis that dx and dy are concatenated,
can be either 0 or 1. Ignored if quantize is False.
|
def create_context(self, state_hash, base_contexts, inputs, outputs):
"""Create a ExecutionContext to run a transaction against.
Args:
state_hash: (str): Merkle root to base state on.
base_contexts (list of str): Context ids of contexts that will
have their state applied to make this context.
inputs (list of str): Addresses that can be read from.
outputs (list of str): Addresses that can be written to.
Returns:
context_id (str): the unique context_id of the session
"""
for address in inputs:
if not self.namespace_is_valid(address):
raise CreateContextException(
"Address or namespace {} listed in inputs is not "
"valid".format(address))
for address in outputs:
if not self.namespace_is_valid(address):
raise CreateContextException(
"Address or namespace {} listed in outputs is not "
"valid".format(address))
addresses_to_find = [add for add in inputs if len(add) == 70]
address_values, reads = self._find_address_values_in_chain(
base_contexts=base_contexts,
addresses_to_find=addresses_to_find)
context = ExecutionContext(
state_hash=state_hash,
read_list=inputs,
write_list=outputs,
base_context_ids=base_contexts)
contexts_asked_not_found = [cid for cid in base_contexts
if cid not in self._contexts]
if contexts_asked_not_found:
raise KeyError(
"Basing a new context off of context ids {} "
"that are not in context manager".format(
contexts_asked_not_found))
context.create_initial(address_values)
self._contexts[context.session_id] = context
if reads:
context.create_prefetch(reads)
self._address_queue.put_nowait(
(context.session_id, state_hash, reads))
return context.session_id
|
Create a ExecutionContext to run a transaction against.
Args:
state_hash: (str): Merkle root to base state on.
base_contexts (list of str): Context ids of contexts that will
have their state applied to make this context.
inputs (list of str): Addresses that can be read from.
outputs (list of str): Addresses that can be written to.
Returns:
context_id (str): the unique context_id of the session
|
def date(self):
"""Convert instant to a date.
>>> instant(2014).date
datetime.date(2014, 1, 1)
>>> instant('2014-2').date
datetime.date(2014, 2, 1)
>>> instant('2014-2-3').date
datetime.date(2014, 2, 3)
"""
instant_date = date_by_instant_cache.get(self)
if instant_date is None:
date_by_instant_cache[self] = instant_date = datetime.date(*self)
return instant_date
|
Convert instant to a date.
>>> instant(2014).date
datetime.date(2014, 1, 1)
>>> instant('2014-2').date
datetime.date(2014, 2, 1)
>>> instant('2014-2-3').date
datetime.date(2014, 2, 3)
|
def _get_wms_request(self, bbox, size_x, size_y):
"""
Returns WMS request.
"""
bbox_3857 = transform_bbox(bbox, CRS.POP_WEB)
return GeopediaWmsRequest(layer=self.layer,
theme=self.theme,
bbox=bbox_3857,
width=size_x,
height=size_y,
image_format=self.image_format,
custom_url_params={CustomUrlParam.TRANSPARENT: True})
|
Returns WMS request.
|
def set_zone(timezone):
'''
Sets the timezone using the tzutil.
Args:
timezone (str): A valid timezone
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If invalid timezone is passed
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
'''
# if it's one of the key's just use it
if timezone.lower() in mapper.win_to_unix:
win_zone = timezone
elif timezone.lower() in mapper.unix_to_win:
# if it's one of the values, use the key
win_zone = mapper.get_win(timezone)
else:
# Raise error because it's neither key nor value
raise CommandExecutionError('Invalid timezone passed: {0}'.format(timezone))
# Set the value
cmd = ['tzutil', '/s', win_zone]
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode']:
raise CommandExecutionError('tzutil encountered an error setting '
'timezone: {0}'.format(timezone),
info=res)
return zone_compare(timezone)
|
Sets the timezone using the tzutil.
Args:
timezone (str): A valid timezone
Returns:
bool: ``True`` if successful, otherwise ``False``
Raises:
CommandExecutionError: If invalid timezone is passed
CLI Example:
.. code-block:: bash
salt '*' timezone.set_zone 'America/Denver'
|
def update_edges(self, elev_fn, dem_proc):
"""
After finishing a calculation, this will update the neighbors and the
todo for that tile
"""
interp = self.build_interpolator(dem_proc)
self.update_edge_todo(elev_fn, dem_proc)
self.set_neighbor_data(elev_fn, dem_proc, interp)
|
After finishing a calculation, this will update the neighbors and the
todo for that tile
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'match') and self.match is not None:
_dict['match'] = self.match
return _dict
|
Return a json dictionary representing this model.
|
def handle_json_GET_neareststops(self, params):
"""Return a list of the nearest 'limit' stops to 'lat', 'lon'"""
schedule = self.server.schedule
lat = float(params.get('lat'))
lon = float(params.get('lon'))
limit = int(params.get('limit'))
stops = schedule.GetNearestStops(lat=lat, lon=lon, n=limit)
return [StopToTuple(s) for s in stops]
|
Return a list of the nearest 'limit' stops to 'lat', 'lon
|
def update_thumbnail(api_key, api_secret, video_key, position=7.0, **kwargs):
"""
Function which updates the thumbnail for an EXISTING video utilizing position parameter.
This function is useful for selecting a new thumbnail from with the already existing video content.
Instead of position parameter, user may opt to utilize thumbnail_index parameter.
Please eee documentation for further information.
:param api_key: <string> JWPlatform api-key
:param api_secret: <string> JWPlatform shared-secret
:param video_key: <string> Video's object ID. Can be found within JWPlayer Dashboard.
:param position: <float> Represents seconds into the duration of a video, for thumbnail extraction.
:param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jw-platform/reference/v1/methods/videos/thumbnails/update.html
:return: <dict> Dict which represents the JSON response.
"""
jwplatform_client = jwplatform.Client(api_key, api_secret)
logging.info("Updating video thumbnail.")
try:
response = jwplatform_client.videos.thumbnails.update(
video_key=video_key,
position=position, # Parameter which specifies seconds into video to extract thumbnail from.
**kwargs)
except jwplatform.errors.JWPlatformError as e:
logging.error("Encountered an error updating thumbnail.\n{}".format(e))
sys.exit(e.message)
return response
|
Function which updates the thumbnail for an EXISTING video utilizing position parameter.
This function is useful for selecting a new thumbnail from with the already existing video content.
Instead of position parameter, user may opt to utilize thumbnail_index parameter.
Please eee documentation for further information.
:param api_key: <string> JWPlatform api-key
:param api_secret: <string> JWPlatform shared-secret
:param video_key: <string> Video's object ID. Can be found within JWPlayer Dashboard.
:param position: <float> Represents seconds into the duration of a video, for thumbnail extraction.
:param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jw-platform/reference/v1/methods/videos/thumbnails/update.html
:return: <dict> Dict which represents the JSON response.
|
def _get_domain_id(self, domain_text_element): # pylint: disable=no-self-use
"""Return the easyname id of the domain."""
try:
# Hierarchy: TR > TD > SPAN > Domain Text
tr_anchor = domain_text_element.parent.parent.parent
td_anchor = tr_anchor.find('td', {'class': 'td_2'})
link = td_anchor.find('a')['href']
domain_id = link.rsplit('/', 1)[-1]
return domain_id
except Exception as error:
errmsg = ('Cannot get the domain id even though the domain seems '
'to exist (%s).', error)
LOGGER.warning(errmsg)
raise AssertionError(errmsg)
|
Return the easyname id of the domain.
|
def download_url(self, timeout=60, name=None):
"""
Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str
"""
if "local" in self.driver.name.lower():
return url_for(SERVER_ENDPOINT,
object_name=self.name,
dl=1,
name=name,
_external=True)
else:
driver_name = self.driver.name.lower()
expires = (datetime.datetime.now()
+ datetime.timedelta(seconds=timeout)).strftime("%s")
if 's3' in driver_name or 'google' in driver_name:
s2s = "GET\n\n\n{expires}\n/{object_name}"\
.format(expires=expires, object_name=self.path)
h = hmac.new(self.driver.secret.encode('utf-8'), s2s.encode('utf-8'), hashlib.sha1)
s = base64.encodestring(h.digest()).strip()
_keyIdName = "AWSAccessKeyId" if "s3" in driver_name else "GoogleAccessId"
params = {
_keyIdName: self.driver.key,
"Expires": expires,
"Signature": s
}
urlkv = urlencode(params)
return "%s?%s" % (self.secure_url, urlkv)
elif 'cloudfiles' in driver_name:
return self.driver.ex_get_object_temp_url(self._obj,
method="GET",
timeout=expires)
else:
raise NotImplemented("This provider '%s' doesn't support or "
"doesn't have a signed url "
"implemented yet" % self.provider_name)
|
Trigger a browse download
:param timeout: int - Time in seconds to expire the download
:param name: str - for LOCAL only, to rename the file being downloaded
:return: str
|
def validate(self, value):
"""Validate field value."""
if value is not None:
if not isinstance(value, list):
raise ValidationError("field must be a list")
for index, element in enumerate(value):
try:
self.inner.validate(element)
except ValidationError as error:
raise ValidationError("invalid element {}: {}".format(
index,
error.args[0],
))
super().validate(value)
|
Validate field value.
|
def tree_adj_to_prec(graph, root=0):
"""Transforms a tree given as adjacency list into predecessor table form.
if graph is not a tree: will return a DFS spanning tree
:param graph: directed graph in listlist or listdict format
:returns: tree in predecessor table representation
:complexity: linear
"""
prec = [None] * len(graph)
prec[root] = root # mark to visit root only once
to_visit = [root]
while to_visit: # DFS
node = to_visit.pop()
for neighbor in graph[node]:
if prec[neighbor] is None:
prec[neighbor] = node
to_visit.append(neighbor)
prec[root] = None # put the standard mark for root
return prec
|
Transforms a tree given as adjacency list into predecessor table form.
if graph is not a tree: will return a DFS spanning tree
:param graph: directed graph in listlist or listdict format
:returns: tree in predecessor table representation
:complexity: linear
|
def list_object_versions(Bucket, Delimiter=None, EncodingType=None, Prefix=None,
region=None, key=None, keyid=None, profile=None):
'''
List objects in a given S3 bucket.
Returns a list of objects.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.list_object_versions mybucket
'''
try:
Versions = []
DeleteMarkers = []
args = {'Bucket': Bucket}
args.update({'Delimiter': Delimiter}) if Delimiter else None
args.update({'EncodingType': EncodingType}) if Delimiter else None
args.update({'Prefix': Prefix}) if Prefix else None
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
IsTruncated = True
while IsTruncated:
ret = conn.list_object_versions(**args)
IsTruncated = ret.get('IsTruncated', False)
if IsTruncated in ('True', 'true', True):
args['KeyMarker'] = ret['NextKeyMarker']
args['VersionIdMarker'] = ret['NextVersionIdMarker']
Versions += ret.get('Versions', [])
DeleteMarkers += ret.get('DeleteMarkers', [])
return {'Versions': Versions, 'DeleteMarkers': DeleteMarkers}
except ClientError as e:
return {'error': __utils__['boto3.get_error'](e)}
|
List objects in a given S3 bucket.
Returns a list of objects.
CLI Example:
.. code-block:: bash
salt myminion boto_s3_bucket.list_object_versions mybucket
|
def redraw(self):
"""
Redraw the Vispy canvas
"""
if self._multiscat is not None:
self._multiscat._update()
self.vispy_widget.canvas.update()
|
Redraw the Vispy canvas
|
def connect(tenant=None, user=None, password=None, token=None, is_public=False):
"""
Authenticates user and returns new platform to user.
This is an entry point to start working with Qubell Api.
:rtype: QubellPlatform
:param str tenant: url to tenant, default taken from 'QUBELL_TENANT'
:param str user: user email, default taken from 'QUBELL_USER'
:param str password: user password, default taken from 'QUBELL_PASSWORD'
:param str token: session token, default taken from 'QUBELL_TOKEN'
:param bool is_public: either to use public or private api (public is not fully supported use with caution)
:return: New Platform instance
"""
if not is_public:
router = PrivatePath(tenant)
else:
router = PublicPath(tenant)
router.public_api_in_use = is_public
if token or (user and password):
router.connect(user, password, token)
return QubellPlatform().init_router(router)
|
Authenticates user and returns new platform to user.
This is an entry point to start working with Qubell Api.
:rtype: QubellPlatform
:param str tenant: url to tenant, default taken from 'QUBELL_TENANT'
:param str user: user email, default taken from 'QUBELL_USER'
:param str password: user password, default taken from 'QUBELL_PASSWORD'
:param str token: session token, default taken from 'QUBELL_TOKEN'
:param bool is_public: either to use public or private api (public is not fully supported use with caution)
:return: New Platform instance
|
def unmount(self, remove_rw=False, allow_lazy=False):
"""Removes all ties of this disk to the filesystem, so the image can be unmounted successfully.
:raises SubsystemError: when one of the underlying commands fails. Some are swallowed.
:raises CleanupError: when actual cleanup fails. Some are swallowed.
"""
for m in list(sorted(self.volumes, key=lambda v: v.mountpoint or "", reverse=True)):
try:
m.unmount(allow_lazy=allow_lazy)
except ImageMounterError:
logger.warning("Error unmounting volume {0}".format(m.mountpoint))
if self._paths.get('nbd'):
_util.clean_unmount(['qemu-nbd', '-d'], self._paths['nbd'], rmdir=False)
if self.mountpoint:
try:
_util.clean_unmount(['fusermount', '-u'], self.mountpoint)
except SubsystemError:
if not allow_lazy:
raise
_util.clean_unmount(['fusermount', '-uz'], self.mountpoint)
if self._paths.get('avfs'):
try:
_util.clean_unmount(['fusermount', '-u'], self._paths['avfs'])
except SubsystemError:
if not allow_lazy:
raise
_util.clean_unmount(['fusermount', '-uz'], self._paths['avfs'])
if self.rw_active() and remove_rw:
os.remove(self.rwpath)
self.is_mounted = False
|
Removes all ties of this disk to the filesystem, so the image can be unmounted successfully.
:raises SubsystemError: when one of the underlying commands fails. Some are swallowed.
:raises CleanupError: when actual cleanup fails. Some are swallowed.
|
def Run(self, unused_args):
"""Estimate the install date of this system."""
# Don't use winreg.KEY_WOW64_64KEY since it breaks on Windows 2000
subkey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
"Software\\Microsoft\\Windows NT\\CurrentVersion",
0, winreg.KEY_READ)
install_date = winreg.QueryValueEx(subkey, "InstallDate")
self.SendReply(rdfvalue.RDFDatetime.FromSecondsSinceEpoch(install_date[0]))
|
Estimate the install date of this system.
|
def walk(filesystem, top, topdown=True, onerror=None, followlinks=False):
"""Perform an os.walk operation over the fake filesystem.
Args:
filesystem: The fake filesystem used for implementation
top: The root directory from which to begin walk.
topdown: Determines whether to return the tuples with the root as
the first entry (`True`) or as the last, after all the child
directory tuples (`False`).
onerror: If not `None`, function which will be called to handle the
`os.error` instance provided when `os.listdir()` fails.
followlinks: If `True`, symbolic links are followed.
Yields:
(path, directories, nondirectories) for top and each of its
subdirectories. See the documentation for the builtin os module
for further details.
"""
def do_walk(top_dir, top_most=False):
top_dir = filesystem.normpath(top_dir)
if not top_most and not followlinks and filesystem.islink(top_dir):
return
try:
top_contents = _classify_directory_contents(filesystem, top_dir)
except OSError as exc:
top_contents = None
if onerror is not None:
onerror(exc)
if top_contents is not None:
if topdown:
yield top_contents
for directory in top_contents[1]:
if not followlinks and filesystem.islink(directory):
continue
for contents in do_walk(filesystem.joinpaths(top_dir,
directory)):
yield contents
if not topdown:
yield top_contents
return do_walk(top, top_most=True)
|
Perform an os.walk operation over the fake filesystem.
Args:
filesystem: The fake filesystem used for implementation
top: The root directory from which to begin walk.
topdown: Determines whether to return the tuples with the root as
the first entry (`True`) or as the last, after all the child
directory tuples (`False`).
onerror: If not `None`, function which will be called to handle the
`os.error` instance provided when `os.listdir()` fails.
followlinks: If `True`, symbolic links are followed.
Yields:
(path, directories, nondirectories) for top and each of its
subdirectories. See the documentation for the builtin os module
for further details.
|
def handle_request(self, environ, start_response):
"""Retrieves the route handler and calls the handler returning its the response
:param dict environ: The WSGI environment dictionary for the request
:param start_response:
:return: The WbResponse for the request
:rtype: WbResponse
"""
urls = self.url_map.bind_to_environ(environ)
try:
endpoint, args = urls.match()
# store original script_name (original prefix) before modifications are made
environ['pywb.app_prefix'] = environ.get('SCRIPT_NAME')
response = endpoint(environ, **args)
return response(environ, start_response)
except HTTPException as e:
redir = self._check_refer_redirect(environ)
if redir:
return redir(environ, start_response)
return e(environ, start_response)
except Exception as e:
if self.debug:
traceback.print_exc()
response = self.rewriterapp._error_response(environ, 'Internal Error: ' + str(e), '500 Server Error')
return response(environ, start_response)
|
Retrieves the route handler and calls the handler returning its the response
:param dict environ: The WSGI environment dictionary for the request
:param start_response:
:return: The WbResponse for the request
:rtype: WbResponse
|
def vm_cputime(vm_=None):
'''
Return cputime used by the vms on this hyper in a
list of dicts:
.. code-block:: python
[
'your-vm': {
'cputime' <int>
'cputime_percent' <int>
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_cputime
'''
with _get_xapi_session() as xapi:
def _info(vm_):
host_rec = _get_record_by_label(xapi, 'VM', vm_)
host_cpus = len(host_rec['host_CPUs'])
if host_rec is False:
return False
host_metrics = _get_metrics_record(xapi, 'VM', host_rec)
vcpus = int(host_metrics['VCPUs_number'])
cputime = int(host_metrics['VCPUs_utilisation']['0'])
cputime_percent = 0
if cputime:
# Divide by vcpus to always return a number between 0 and 100
cputime_percent = (1.0e-7 * cputime / host_cpus) / vcpus
return {'cputime': int(cputime),
'cputime_percent': int('{0:.0f}'.format(cputime_percent))}
info = {}
if vm_:
info[vm_] = _info(vm_)
return info
for vm_ in list_domains():
info[vm_] = _info(vm_)
return info
|
Return cputime used by the vms on this hyper in a
list of dicts:
.. code-block:: python
[
'your-vm': {
'cputime' <int>
'cputime_percent' <int>
},
...
]
If you pass a VM name in as an argument then it will return info
for just the named VM, otherwise it will return all VMs.
CLI Example:
.. code-block:: bash
salt '*' virt.vm_cputime
|
def select_authors_by_geo(query):
"""Pass exact name (case insensitive) of geography name, return ordered set
of author ids.
"""
for geo, ids in AUTHOR_GEO.items():
if geo.casefold() == query.casefold():
return set(ids)
|
Pass exact name (case insensitive) of geography name, return ordered set
of author ids.
|
def get_pltdotstr(self, **kws_usr):
"""Plot one GO header group in Grouper."""
dotstrs = self.get_pltdotstrs(**kws_usr)
assert len(dotstrs) == 1
return dotstrs[0]
|
Plot one GO header group in Grouper.
|
def to_json_file(self, json_file_path):
""" Save this instance to a json file."""
with open(json_file_path, "w", encoding='utf-8') as writer:
writer.write(self.to_json_string())
|
Save this instance to a json file.
|
def recv(self, maxsize=None):
'''
Receive data from the terminal as a (``stdout``, ``stderr``) tuple. If
any of those is ``None`` we can no longer communicate with the
terminal's child process.
'''
if maxsize is None:
maxsize = 1024
elif maxsize < 1:
maxsize = 1
return self._recv(maxsize)
|
Receive data from the terminal as a (``stdout``, ``stderr``) tuple. If
any of those is ``None`` we can no longer communicate with the
terminal's child process.
|
def calc_next_run(self):
"""Calculate next run time of this task"""
base_time = self.last_run
if self.last_run == HAS_NOT_RUN:
if self.wait_for_schedule is False:
self.next_run = timezone.now()
self.wait_for_schedule = False # reset so we don't run on every clock tick
self.save()
return
else:
base_time = timezone.now()
self.next_run = croniter(self.schedule, base_time).get_next(datetime)
self.save()
|
Calculate next run time of this task
|
def make_error_block(ec_info, data_block):
"""\
Creates the error code words for the provided data block.
:param ec_info: ECC information (number of blocks, number of code words etc.)
:param data_block: Iterable of (integer) code words.
"""
num_error_words = ec_info.num_total - ec_info.num_data
error_block = bytearray(data_block)
error_block.extend([0] * num_error_words)
gen = consts.GEN_POLY[num_error_words]
gen_log = consts.GALIOS_LOG
gen_exp = consts.GALIOS_EXP
len_data = len(data_block)
# Extended synthetic division, see http://research.swtch.com/field
for i in range(len_data):
coef = error_block[i]
if coef != 0: # log(0) is undefined
lcoef = gen_log[coef]
for j in range(num_error_words):
error_block[i + j + 1] ^= gen_exp[lcoef + gen[j]]
return error_block[len_data:]
|
\
Creates the error code words for the provided data block.
:param ec_info: ECC information (number of blocks, number of code words etc.)
:param data_block: Iterable of (integer) code words.
|
def figure_out_build_file(absolute_path, local_path=None):
"""
try to figure out the build file (Dockerfile or just a container.yaml) from provided
path and optionally from relative local path this is meant to be used with
git repo: absolute_path is path to git repo, local_path is path to dockerfile
within git repo
:param absolute_path:
:param local_path:
:return: tuple, (dockerfile_path, dir_with_dockerfile_path)
"""
logger.info("searching for dockerfile in '%s' (local path %s)", absolute_path, local_path)
logger.debug("abs path = '%s', local path = '%s'", absolute_path, local_path)
if local_path:
if local_path.endswith(DOCKERFILE_FILENAME) or local_path.endswith(REPO_CONTAINER_CONFIG):
git_build_file_dir = os.path.dirname(local_path)
build_file_dir = os.path.abspath(os.path.join(absolute_path, git_build_file_dir))
else:
build_file_dir = os.path.abspath(os.path.join(absolute_path, local_path))
else:
build_file_dir = os.path.abspath(absolute_path)
if not os.path.isdir(build_file_dir):
raise IOError("Directory '%s' doesn't exist." % build_file_dir)
build_file_path = os.path.join(build_file_dir, DOCKERFILE_FILENAME)
if os.path.isfile(build_file_path):
logger.debug("Dockerfile found: '%s'", build_file_path)
return build_file_path, build_file_dir
build_file_path = os.path.join(build_file_dir, REPO_CONTAINER_CONFIG)
if os.path.isfile(build_file_path):
logger.debug("container.yaml found: '%s'", build_file_path)
# Without this check, there would be a confusing 'Dockerfile has not yet been generated'
# exception later.
with open(build_file_path) as f:
data = yaml.safe_load(f)
if data is None or 'flatpak' not in data:
raise RuntimeError("container.yaml found, but no accompanying Dockerfile")
return build_file_path, build_file_dir
raise IOError("Dockerfile '%s' doesn't exist." % os.path.join(build_file_dir,
DOCKERFILE_FILENAME))
|
try to figure out the build file (Dockerfile or just a container.yaml) from provided
path and optionally from relative local path this is meant to be used with
git repo: absolute_path is path to git repo, local_path is path to dockerfile
within git repo
:param absolute_path:
:param local_path:
:return: tuple, (dockerfile_path, dir_with_dockerfile_path)
|
def reraise(self, cause_cls_finder=None):
"""Re-raise captured exception (possibly trying to recreate)."""
if self._exc_info:
six.reraise(*self._exc_info)
else:
# Attempt to regenerate the full chain (and then raise
# from the root); without a traceback, oh well...
root = None
parent = None
for cause in itertools.chain([self], self.iter_causes()):
if cause_cls_finder is not None:
cause_cls = cause_cls_finder(cause)
else:
cause_cls = None
if cause_cls is None:
# Unable to find where this cause came from, give up...
raise WrappedFailure([self])
exc = cause_cls(
*cause.exception_args, **cause.exception_kwargs)
# Saving this will ensure that if this same exception
# is serialized again that we will extract the traceback
# from it directly (thus proxying along the original
# traceback as much as we can).
exc.__traceback_str__ = cause.traceback_str
if root is None:
root = exc
if parent is not None:
parent.__cause__ = exc
parent = exc
six.reraise(type(root), root, tb=None)
|
Re-raise captured exception (possibly trying to recreate).
|
def get_days_since_last_modified(filename):
"""
:param filename: Absolute file path
:return: Number of days since filename's last modified time
"""
now = datetime.now()
last_modified = datetime.fromtimestamp(os.path.getmtime(filename))
return (now - last_modified).days
|
:param filename: Absolute file path
:return: Number of days since filename's last modified time
|
def indexAt(self, point):
"""Returns the index of the component at *point* relative to view coordinates.
If there is None, and empty index is returned. :qtdoc:`Re-implemented<QAbstractItemView.indexAt>`
:param point: the point, in view coordinates, to find an index for
:type point: :qtdoc:`QPoint`
:returns: :qtdoc:`QModelIndex`
"""
# Transform the view coordinates into contents widget coordinates.
wx = point.x() + self.horizontalScrollBar().value()
wy = point.y() + self.verticalScrollBar().value()
self._calculateRects()
# naive search
for row in range(self.model().rowCount(self.rootIndex())):
for col in range(self.model().columnCountForRow(row)):
if self._rects[row][col].contains(wx, wy):
return self.model().index(row, col, self.rootIndex())
return QtCore.QModelIndex()
|
Returns the index of the component at *point* relative to view coordinates.
If there is None, and empty index is returned. :qtdoc:`Re-implemented<QAbstractItemView.indexAt>`
:param point: the point, in view coordinates, to find an index for
:type point: :qtdoc:`QPoint`
:returns: :qtdoc:`QModelIndex`
|
def add_deformation(chn_names, data):
"""From circularity, compute the deformation
This method is useful for RT-DC data sets that contain
the circularity but not the deformation.
"""
if "deformation" not in chn_names:
for ii, ch in enumerate(chn_names):
if ch == "circularity":
chn_names.append("deformation")
data.append(1-data[ii])
return chn_names, data
|
From circularity, compute the deformation
This method is useful for RT-DC data sets that contain
the circularity but not the deformation.
|
def register_view(self, view):
""" register_view will create the needed structure
in order to be able to sent all data to Prometheus
"""
v_name = get_view_name(self.options.namespace, view)
if v_name not in self.registered_views:
desc = {'name': v_name,
'documentation': view.description,
'labels': list(map(sanitize, view.columns))}
self.registered_views[v_name] = desc
self.registry.register(self)
|
register_view will create the needed structure
in order to be able to sent all data to Prometheus
|
def cloudata(site):
""" Returns a dictionary with all the tag clouds related to a site.
"""
# XXX: this looks like it can be done via ORM
tagdata = getquery("""
SELECT feedjack_post.feed_id, feedjack_tag.name, COUNT(*)
FROM feedjack_post, feedjack_subscriber, feedjack_tag,
feedjack_post_tags
WHERE feedjack_post.feed_id=feedjack_subscriber.feed_id AND
feedjack_post_tags.tag_id=feedjack_tag.id AND
feedjack_post_tags.post_id=feedjack_post.id AND
feedjack_subscriber.site_id=%d
GROUP BY feedjack_post.feed_id, feedjack_tag.name
ORDER BY feedjack_post.feed_id, feedjack_tag.name""" % site.id)
tagdict = {}
globaldict = {}
cloudict = {}
for feed_id, tagname, tagcount in tagdata:
if feed_id not in tagdict:
tagdict[feed_id] = []
tagdict[feed_id].append((tagname, tagcount))
try:
globaldict[tagname] += tagcount
except KeyError:
globaldict[tagname] = tagcount
tagdict[0] = globaldict.items()
for key, val in tagdict.items():
cloudict[key] = build(site, val)
return cloudict
|
Returns a dictionary with all the tag clouds related to a site.
|
def pldist(point, start, end):
"""
Calculates the distance from ``point`` to the line given
by the points ``start`` and ``end``.
:param point: a point
:type point: numpy array
:param start: a point of the line
:type start: numpy array
:param end: another point of the line
:type end: numpy array
"""
if np.all(np.equal(start, end)):
return np.linalg.norm(point - start)
return np.divide(
np.abs(np.linalg.norm(np.cross(end - start, start - point))),
np.linalg.norm(end - start))
|
Calculates the distance from ``point`` to the line given
by the points ``start`` and ``end``.
:param point: a point
:type point: numpy array
:param start: a point of the line
:type start: numpy array
:param end: another point of the line
:type end: numpy array
|
def should_retry_on_error(self, error):
"""rules for retry
:param error:
ProtocolException that returns from Server
"""
if self.is_streaming_request:
# not retry for streaming request
return False
retry_flag = self.headers.get('re', retry.DEFAULT)
if retry_flag == retry.NEVER:
return False
if isinstance(error, StreamClosedError):
return True
if error.code in [ErrorCode.bad_request, ErrorCode.cancelled,
ErrorCode.unhealthy]:
return False
elif error.code in [ErrorCode.busy, ErrorCode.declined]:
return True
elif error.code is ErrorCode.timeout:
return retry_flag is not retry.CONNECTION_ERROR
elif error.code in [ErrorCode.network_error,
ErrorCode.fatal,
ErrorCode.unexpected]:
return retry_flag is not retry.TIMEOUT
else:
return False
|
rules for retry
:param error:
ProtocolException that returns from Server
|
def main_inject(args):
"""
mapped to pout.inject on the command line, makes it easy to make pout global
without having to actually import it in your python environment
.. since:: 2018-08-13
:param args: Namespace, the parsed CLI arguments passed into the application
:returns: int, the return code of the CLI
"""
ret = 0
try:
filepath = SiteCustomizeFile()
if filepath.is_injected():
logger.info("Pout has already been injected into {}".format(filepath))
else:
if filepath.inject():
logger.info("Injected pout into {}".format(filepath))
else:
logger.info("Failed to inject pout into {}".format(filepath))
except IOError as e:
ret = 1
logger.info(str(e))
return ret
|
mapped to pout.inject on the command line, makes it easy to make pout global
without having to actually import it in your python environment
.. since:: 2018-08-13
:param args: Namespace, the parsed CLI arguments passed into the application
:returns: int, the return code of the CLI
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.