code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def load_spacy_rule(file_path: str) -> Dict:
"""
A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules
"""
with open(file_path) as fp:
return json.load(fp)
|
A spacy rule file is a json file.
Args:
file_path (str): path to a text file containing a spacy rule sets.
Returns: Dict as the representation of spacy rules
|
def _generate_union_cstor_funcs(self, union):
"""Emits standard union constructor."""
for field in union.all_fields:
enum_field_name = fmt_enum_name(field.name, union)
func_args = [] if is_void_type(
field.data_type) else fmt_func_args_from_fields([field])
with self.block_func(
func=self._cstor_name_from_field(field),
args=func_args,
return_type='instancetype'):
self.emit('self = [super init];')
with self.block_init():
self.emit('_tag = {};'.format(enum_field_name))
if not is_void_type(field.data_type):
self.emit('_{} = {};'.format(
fmt_var(field.name), fmt_var(field.name)))
self.emit()
|
Emits standard union constructor.
|
def pick_config_ids(device_type, os, navigator):
"""
Select one random pair (device_type, os_id, navigator_id) from
all possible combinations matching the given os and
navigator filters.
:param os: allowed os(es)
:type os: string or list/tuple or None
:param navigator: allowed browser engine(s)
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "tablet", "all"
"""
if os is None:
default_dev_types = ['desktop']
else:
default_dev_types = list(DEVICE_TYPE_OS.keys())
dev_type_choices = get_option_choices(
'device_type', device_type, default_dev_types,
list(DEVICE_TYPE_OS.keys())
)
os_choices = get_option_choices('os', os, list(OS_NAVIGATOR.keys()),
list(OS_NAVIGATOR.keys()))
nav_choices = get_option_choices('navigator', navigator,
list(NAVIGATOR_OS.keys()),
list(NAVIGATOR_OS.keys()))
variants = []
for dev, os, nav in product(dev_type_choices, os_choices,
nav_choices):
if (os in DEVICE_TYPE_OS[dev]
and nav in DEVICE_TYPE_NAVIGATOR[dev]
and nav in OS_NAVIGATOR[os]):
variants.append((dev, os, nav))
if not variants:
raise InvalidOption('Options device_type, os and navigator'
' conflicts with each other')
device_type, os_id, navigator_id = choice(variants)
assert os_id in OS_PLATFORM
assert navigator_id in NAVIGATOR_OS
assert device_type in DEVICE_TYPE_OS
return device_type, os_id, navigator_id
|
Select one random pair (device_type, os_id, navigator_id) from
all possible combinations matching the given os and
navigator filters.
:param os: allowed os(es)
:type os: string or list/tuple or None
:param navigator: allowed browser engine(s)
:type navigator: string or list/tuple or None
:param device_type: limit possible oses by device type
:type device_type: list/tuple or None, possible values:
"desktop", "smartphone", "tablet", "all"
|
def gsea_compute(data, gmt, n, weighted_score_type, permutation_type,
method, pheno_pos, pheno_neg, classes, ascending,
processes=1, seed=None, single=False, scale=False):
"""compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
"""
w = weighted_score_type
subsets = sorted(gmt.keys())
es = []
RES=[]
hit_ind=[]
esnull = [ [] for a in range(len(subsets)) ]
logging.debug("Start to compute enrichment scores......................")
if permutation_type == "phenotype":
logging.debug("Start to permutate classes..............................")
# shuffling classes and generate random correlation rankings
rs = np.random.RandomState(seed)
genes_mat, cor_mat = ranking_metric_tensor(exprs=data, method=method,
permutation_num=n,
pos=pheno_pos, neg=pheno_neg,
classes=classes,
ascending=ascending, rs=rs)
# compute es, esnulls. hits, RES
logging.debug("Start to compute enrichment nulls.......................")
es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=genes_mat,
cor_mat=cor_mat,
gene_sets=gmt,
weighted_score_type=w,
nperm=n, rs=rs,
single=False, scale=False,)
else:
# Prerank, ssGSEA, GSEA with gene_set permutation
gl, cor_vec = data.index.values, data.values
logging.debug("Start to compute es and esnulls........................")
# es, esnull, hit_ind, RES = enrichment_score_tensor(gene_mat=gl,
# cor_mat=cor_mat,
# gene_sets=gmt,
# weighted_score_type=w,
# nperm=n, rs=rs
# single=single, scale=scale)
# split large array into smaller blocks to avoid memory overflow
temp_esnu=[]
pool_esnu = Pool(processes=processes)
for subset in subsets:
# you have to reseed, or all your processes are sharing the same seed value
rs = np.random.RandomState(seed)
temp_esnu.append(pool_esnu.apply_async(enrichment_score,
args=(gl, cor_vec, gmt.get(subset), w,
n, rs, single, scale)))
pool_esnu.close()
pool_esnu.join()
# esn is a list, don't need to use append method.
for si, temp in enumerate(temp_esnu):
e, enu, hit, rune = temp.get()
esnull[si] = enu
es.append(e)
RES.append(rune)
hit_ind.append(hit)
return gsea_significance(es, esnull), hit_ind, RES, subsets
|
compute enrichment scores and enrichment nulls.
:param data: preprocessed expression dataframe or a pre-ranked file if prerank=True.
:param dict gmt: all gene sets in .gmt file. need to call load_gmt() to get results.
:param int n: permutation number. default: 1000.
:param str method: ranking_metric method. see above.
:param str pheno_pos: one of labels of phenotype's names.
:param str pheno_neg: one of labels of phenotype's names.
:param list classes: a list of phenotype labels, to specify which column of dataframe belongs to what category of phenotype.
:param float weighted_score_type: default:1
:param bool ascending: sorting order of rankings. Default: False.
:param seed: random seed. Default: np.random.RandomState()
:param bool scale: if true, scale es by gene number.
:return: a tuple contains::
| zipped results of es, nes, pval, fdr.
| nested list of hit indices of input gene_list.
| nested list of ranked enrichment score of each input gene_sets.
| list of enriched terms
|
def count(cls, slug):
"""get the number of objects in the cache for a given slug
:param slug: cache key
:return: `int`
"""
from .models import Content
# Gets the count for a tag, hopefully form an in-memory cache.
cnt = cls._cache.get(slug)
if cnt is None:
cnt = Content.search_objects.search(tags=[slug]).count()
cls._cache[slug] = cnt
return cnt
|
get the number of objects in the cache for a given slug
:param slug: cache key
:return: `int`
|
def find_sanitizer(self, name):
"""
Searches for a sanitizer function with given name. The name should
contain two parts separated from each other with a dot, the first
part being the module name while the second being name of the function
contained in the module, when it's being prefixed with "sanitize_".
The lookup process consists from three attempts, which are:
1. First package to look the module will be top level package called
"sanitizers".
2. Module will be looked under the "addon" packages, if they have been
defined.
3. Finally the sanitation function will be looked from the builtin
sanitizers located in "database_sanitizer.sanitizers" package.
If none of these provide any results, ConfigurationError will be
thrown.
:param name: "Full name" of the sanitation function containing name
of the module as well as name of the function.
:type name: str
:return: First function which can be imported with the given name.
:rtype: callable
"""
# Split the sanitizer name into two parts, one containing the Python
# module name, while second containing portion of the function name
# we are looking for.
name_parts = name.split(".")
if len(name_parts) < 2:
raise ConfigurationError(
"Unable to separate module name from function name in '%s'" % (
name,
),
)
module_name_suffix = ".".join(name_parts[:-1])
function_name = "sanitize_%s" % (name_parts[-1],)
# Phase 1: Look for custom sanitizer under a top level package called
# "sanitizers".
module_name = "sanitizers.%s" % (module_name_suffix,)
callback = self.find_sanitizer_from_module(
module_name=module_name,
function_name=function_name,
)
if callback:
return callback
# Phase 2: Look for the sanitizer under "addon" packages, if any of
# such have been defined.
for addon_package_name in self.addon_packages:
module_name = "%s.%s" % (
addon_package_name,
module_name_suffix,
)
callback = self.find_sanitizer_from_module(
module_name=module_name,
function_name=function_name,
)
if callback:
return callback
# Phase 3: Look from builtin sanitizers.
module_name = "database_sanitizer.sanitizers.%s" % (module_name_suffix,)
callback = self.find_sanitizer_from_module(
module_name=module_name,
function_name=function_name,
)
if callback:
return callback
# Give up.
raise ConfigurationError("Unable to find sanitizer called '%s'" % (
name,
))
|
Searches for a sanitizer function with given name. The name should
contain two parts separated from each other with a dot, the first
part being the module name while the second being name of the function
contained in the module, when it's being prefixed with "sanitize_".
The lookup process consists from three attempts, which are:
1. First package to look the module will be top level package called
"sanitizers".
2. Module will be looked under the "addon" packages, if they have been
defined.
3. Finally the sanitation function will be looked from the builtin
sanitizers located in "database_sanitizer.sanitizers" package.
If none of these provide any results, ConfigurationError will be
thrown.
:param name: "Full name" of the sanitation function containing name
of the module as well as name of the function.
:type name: str
:return: First function which can be imported with the given name.
:rtype: callable
|
def GetList(self):
"""Get Info on Current List
This is run in __init__ so you don't
have to run it again.
Access from self.schema
"""
# Build Request
soap_request = soap('GetList')
soap_request.add_parameter('listName', self.listName)
self.last_request = str(soap_request)
# Send Request
response = self._session.post(url=self._url('Lists'),
headers=self._headers('GetList'),
data=str(soap_request),
verify=self._verify_ssl,
timeout=self.timeout)
# Parse Response
if response.status_code == 200:
envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree))
_list = envelope[0][0][0][0]
info = {key: value for (key, value) in _list.items()}
for row in _list[0].getchildren():
self.fields.append({key: value for (key, value) in row.items()})
for setting in _list[1].getchildren():
self.regional_settings[
setting.tag.strip('{http://schemas.microsoft.com/sharepoint/soap/}')] = setting.text
for setting in _list[2].getchildren():
self.server_settings[
setting.tag.strip('{http://schemas.microsoft.com/sharepoint/soap/}')] = setting.text
fields = envelope[0][0][0][0][0]
else:
raise Exception("ERROR:", response.status_code, response.text)
|
Get Info on Current List
This is run in __init__ so you don't
have to run it again.
Access from self.schema
|
def com_google_fonts_check_name_postscriptname(ttFont, style, familyname):
""" Check name table: POSTSCRIPT_NAME entries. """
from fontbakery.utils import name_entry_id
failed = False
for name in ttFont['name'].names:
if name.nameID == NameID.POSTSCRIPT_NAME:
expected_value = f"{familyname}-{style}"
string = name.string.decode(name.getEncoding()).strip()
if string != expected_value:
failed = True
yield FAIL, ("Entry {} on the 'name' table: "
"Expected '{}' "
"but got '{}'.").format(name_entry_id(name),
expected_value,
string)
if not failed:
yield PASS, "POSTCRIPT_NAME entries are all good."
|
Check name table: POSTSCRIPT_NAME entries.
|
def hydrate(self, values):
""" Convert PackStream values into native values.
"""
def hydrate_(obj):
if isinstance(obj, Structure):
try:
f = self.hydration_functions[obj.tag]
except KeyError:
# If we don't recognise the structure type, just return it as-is
return obj
else:
return f(*map(hydrate_, obj.fields))
elif isinstance(obj, list):
return list(map(hydrate_, obj))
elif isinstance(obj, dict):
return {key: hydrate_(value) for key, value in obj.items()}
else:
return obj
return tuple(map(hydrate_, values))
|
Convert PackStream values into native values.
|
def get_alias(self,
alias=None,
manifest=None,
verify=True,
sizes=False,
dcd=None):
# pylint: disable=too-many-arguments
"""
Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:type manifest: str
:param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).
:type verify: bool
:param sizes: Whether to return sizes of the blobs along with their hashes
:type sizes: bool
:param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.
:type dcd: str
:rtype: list
:returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.
"""
return self._get_alias(alias, manifest, verify, sizes, dcd, False)
|
Get the blob hashes assigned to an alias.
:param alias: Alias name. You almost definitely will only need to pass this argument.
:type alias: str
:param manifest: If you previously obtained a manifest, specify it here instead of ``alias``. You almost definitely won't need to do this.
:type manifest: str
:param verify: (v1 schema only) Whether to verify the integrity of the alias definition in the registry itself. You almost definitely won't need to change this from the default (``True``).
:type verify: bool
:param sizes: Whether to return sizes of the blobs along with their hashes
:type sizes: bool
:param dcd: (if ``manifest`` is specified) The Docker-Content-Digest header returned when getting the manifest. If present, this is checked against the manifest.
:type dcd: str
:rtype: list
:returns: If ``sizes`` is falsey, a list of blob hashes (strings) which are assigned to the alias. If ``sizes`` is truthy, a list of (hash,size) tuples for each blob.
|
def parse_value(self, tup_tree):
"""
Parse a VALUE element and return its text content as a unicode string.
Whitespace is preserved.
The conversion of the text representation of the value to a CIM data
type object requires CIM type information which is not available on the
VALUE element and therefore will be done when parsing higher level
elements that have that information.
::
<!ELEMENT VALUE (#PCDATA)>
"""
self.check_node(tup_tree, 'VALUE', (), (), (), allow_pcdata=True)
return self.pcdata(tup_tree)
|
Parse a VALUE element and return its text content as a unicode string.
Whitespace is preserved.
The conversion of the text representation of the value to a CIM data
type object requires CIM type information which is not available on the
VALUE element and therefore will be done when parsing higher level
elements that have that information.
::
<!ELEMENT VALUE (#PCDATA)>
|
def html(self):
""" Create an ``lxml``-based HTML DOM from the response. The tree
will not have a root, so all queries need to be relative
(i.e. start with a dot).
"""
try:
from lxml import html
return html.fromstring(self.content)
except ImportError as ie:
raise DependencyException(ie)
|
Create an ``lxml``-based HTML DOM from the response. The tree
will not have a root, so all queries need to be relative
(i.e. start with a dot).
|
def _execute_callback(self, status, message, job, res, err, stacktrace):
"""Execute the callback.
:param status: Job status. Possible values are "invalid" (job could not
be deserialized or was malformed), "failure" (job raised an error),
"timeout" (job timed out), or "success" (job finished successfully
and returned a result).
:type status: str
:param message: Kafka message.
:type message: :doc:`kq.Message <message>`
:param job: Job object, or None if **status** was "invalid".
:type job: kq.Job
:param res: Job result, or None if an exception was raised.
:type res: object | None
:param err: Exception raised by job, or None if there was none.
:type err: Exception | None
:param stacktrace: Exception traceback, or None if there was none.
:type stacktrace: str | None
"""
if self._callback is not None:
try:
self._logger.info('Executing callback ...')
self._callback(status, message, job, res, err, stacktrace)
except Exception as e:
self._logger.exception(
'Callback raised an exception: {}'.format(e))
|
Execute the callback.
:param status: Job status. Possible values are "invalid" (job could not
be deserialized or was malformed), "failure" (job raised an error),
"timeout" (job timed out), or "success" (job finished successfully
and returned a result).
:type status: str
:param message: Kafka message.
:type message: :doc:`kq.Message <message>`
:param job: Job object, or None if **status** was "invalid".
:type job: kq.Job
:param res: Job result, or None if an exception was raised.
:type res: object | None
:param err: Exception raised by job, or None if there was none.
:type err: Exception | None
:param stacktrace: Exception traceback, or None if there was none.
:type stacktrace: str | None
|
def parse(self):
"""
Parses everyting into a datastructure that looks like:
result = [{
'origin_filename': '',
'result_filename': '',
'origin_lines': [], // all lines of the original file
'result_lines': [], // all lines of the newest file
'added_lines': [], // all lines added to the result file
'removed_lines': [], // all lines removed from the result file
}, ...]
"""
result = []
z = None
before_line_number, after_line_number = 0, 0
position = 0
for line in self.diff_text.splitlines():
# New File
match = re.search(r'diff .*a/(?P<origin_filename>.*) '
r'b/(?P<result_filename>.*)', line)
if match is not None:
if z is not None:
result.append(z)
z = Entry(match.group('origin_filename'),
match.group('result_filename'))
position = 0
continue
if self.should_skip_line(line):
continue
header = diff_re.search(line)
if header is not None:
before_line_number = int(header.group('removed_start'))
after_line_number = int(header.group('added_start'))
position += 1
continue
# removed line
if line.startswith('-'):
z.new_removed(Line(before_line_number, position, line[1:]))
z.new_origin(Line(before_line_number, position, line[1:]))
before_line_number += 1
# added line
elif line.startswith('+'):
z.new_added(Line(after_line_number, position, line[1:]))
z.new_result(Line(after_line_number, position, line[1:]))
after_line_number += 1
# untouched context line.
else:
z.new_origin(Line(before_line_number, position, line[1:]))
z.new_result(Line(after_line_number, position, line[1:]))
before_line_number += 1
after_line_number += 1
position += 1
if z is not None:
result.append(z)
return result
|
Parses everyting into a datastructure that looks like:
result = [{
'origin_filename': '',
'result_filename': '',
'origin_lines': [], // all lines of the original file
'result_lines': [], // all lines of the newest file
'added_lines': [], // all lines added to the result file
'removed_lines': [], // all lines removed from the result file
}, ...]
|
def check_tweet(tweet, validation_checking=False):
"""
Ensures a tweet is valid and determines the type of format for the tweet.
Args:
tweet (dict/Tweet): the tweet payload
validation_checking (bool): check for valid key structure in a tweet.
"""
if "id" not in tweet:
raise NotATweetError("This text has no 'id' key")
original_format = is_original_format(tweet)
if original_format:
_check_original_format_tweet(tweet, validation_checking=validation_checking)
else:
_check_activity_streams_tweet(tweet, validation_checking=validation_checking)
return original_format
|
Ensures a tweet is valid and determines the type of format for the tweet.
Args:
tweet (dict/Tweet): the tweet payload
validation_checking (bool): check for valid key structure in a tweet.
|
def _marshall_value(value):
"""
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required.
"""
if PYTHON3 and isinstance(value, bytes):
return {'B': base64.b64encode(value).decode('ascii')}
elif PYTHON3 and isinstance(value, str):
return {'S': value}
elif not PYTHON3 and isinstance(value, str):
if is_binary(value):
return {'B': base64.b64encode(value).decode('ascii')}
return {'S': value}
elif not PYTHON3 and isinstance(value, unicode):
return {'S': value.encode('utf-8')}
elif isinstance(value, dict):
return {'M': marshall(value)}
elif isinstance(value, bool):
return {'BOOL': value}
elif isinstance(value, (int, float)):
return {'N': str(value)}
elif isinstance(value, datetime.datetime):
return {'S': value.isoformat()}
elif isinstance(value, uuid.UUID):
return {'S': str(value)}
elif isinstance(value, list):
return {'L': [_marshall_value(v) for v in value]}
elif isinstance(value, set):
if PYTHON3 and all([isinstance(v, bytes) for v in value]):
return {'BS': _encode_binary_set(value)}
elif PYTHON3 and all([isinstance(v, str) for v in value]):
return {'SS': sorted(list(value))}
elif all([isinstance(v, (int, float)) for v in value]):
return {'NS': sorted([str(v) for v in value])}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) for v in value]):
return {'BS': _encode_binary_set(value)}
elif not PYTHON3 and all([isinstance(v, str) for v in value]) and \
all([is_binary(v) is False for v in value]):
return {'SS': sorted(list(value))}
else:
raise ValueError('Can not mix types in a set')
elif value is None:
return {'NULL': True}
raise ValueError('Unsupported type: %s' % type(value))
|
Recursively transform `value` into an AttributeValue `dict`
:param mixed value: The value to encode
:rtype: dict
:raises ValueError: for unsupported types
Return the value as dict indicating the data type and transform or
recursively process the value if required.
|
def resourceprep(string, allow_unassigned=False):
"""
Process the given `string` using the Resourceprep (`RFC 6122`_) profile. In
the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError`
is raised.
"""
chars = list(string)
_resourceprep_do_mapping(chars)
do_normalization(chars)
check_prohibited_output(
chars,
(
stringprep.in_table_c12,
stringprep.in_table_c21,
stringprep.in_table_c22,
stringprep.in_table_c3,
stringprep.in_table_c4,
stringprep.in_table_c5,
stringprep.in_table_c6,
stringprep.in_table_c7,
stringprep.in_table_c8,
stringprep.in_table_c9,
))
check_bidi(chars)
if not allow_unassigned:
check_unassigned(
chars,
(
stringprep.in_table_a1,
)
)
return "".join(chars)
|
Process the given `string` using the Resourceprep (`RFC 6122`_) profile. In
the error cases defined in `RFC 3454`_ (stringprep), a :class:`ValueError`
is raised.
|
def _client_wrapper(attr, *args, **kwargs):
'''
Common functionality for running low-level API calls
'''
catch_api_errors = kwargs.pop('catch_api_errors', True)
func = getattr(__context__['docker.client'], attr, None)
if func is None or not hasattr(func, '__call__'):
raise SaltInvocationError('Invalid client action \'{0}\''.format(attr))
if attr in ('push', 'pull'):
try:
# Refresh auth config from config.json
__context__['docker.client'].reload_config()
except AttributeError:
pass
err = ''
try:
log.debug(
'Attempting to run docker-py\'s "%s" function '
'with args=%s and kwargs=%s', attr, args, kwargs
)
ret = func(*args, **kwargs)
except docker.errors.APIError as exc:
if catch_api_errors:
# Generic handling of Docker API errors
raise CommandExecutionError(
'Error {0}: {1}'.format(exc.response.status_code,
exc.explanation)
)
else:
# Allow API errors to be caught further up the stack
raise
except docker.errors.DockerException as exc:
# More general docker exception (catches InvalidVersion, etc.)
raise CommandExecutionError(exc.__str__())
except Exception as exc:
err = exc.__str__()
else:
return ret
# If we're here, it's because an exception was caught earlier, and the
# API command failed.
msg = 'Unable to perform {0}'.format(attr)
if err:
msg += ': {0}'.format(err)
raise CommandExecutionError(msg)
|
Common functionality for running low-level API calls
|
def __clear_bp(self, aProcess):
"""
Restores the original permissions of the target pages.
@type aProcess: L{Process}
@param aProcess: Process object.
"""
lpAddress = self.get_address()
flNewProtect = aProcess.mquery(lpAddress).Protect
flNewProtect = flNewProtect & (0xFFFFFFFF ^ win32.PAGE_GUARD) # DWORD
aProcess.mprotect(lpAddress, self.get_size(), flNewProtect)
|
Restores the original permissions of the target pages.
@type aProcess: L{Process}
@param aProcess: Process object.
|
def cache_url(url, model_dir=None, progress=True):
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
"""
if model_dir is None:
torch_home = os.path.expanduser(os.getenv('TORCH_HOME', '~/.torch'))
model_dir = os.getenv('TORCH_MODEL_ZOO', os.path.join(torch_home, 'models'))
if not os.path.exists(model_dir):
os.makedirs(model_dir)
parts = urlparse(url)
filename = os.path.basename(parts.path)
if filename == "model_final.pkl":
# workaround as pre-trained Caffe2 models from Detectron have all the same filename
# so make the full path the filename by replacing / with _
filename = parts.path.replace("/", "_")
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file) and is_main_process():
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
hash_prefix = HASH_REGEX.search(filename)
if hash_prefix is not None:
hash_prefix = hash_prefix.group(1)
# workaround: Caffe2 models don't have a hash, but follow the R-50 convention,
# which matches the hash PyTorch uses. So we skip the hash matching
# if the hash_prefix is less than 6 characters
if len(hash_prefix) < 6:
hash_prefix = None
_download_url_to_file(url, cached_file, hash_prefix, progress=progress)
synchronize()
return cached_file
|
r"""Loads the Torch serialized object at the given URL.
If the object is already present in `model_dir`, it's deserialized and
returned. The filename part of the URL should follow the naming convention
``filename-<sha256>.ext`` where ``<sha256>`` is the first eight or more
digits of the SHA256 hash of the contents of the file. The hash is used to
ensure unique names and to verify the contents of the file.
The default value of `model_dir` is ``$TORCH_HOME/models`` where
``$TORCH_HOME`` defaults to ``~/.torch``. The default directory can be
overridden with the ``$TORCH_MODEL_ZOO`` environment variable.
Args:
url (string): URL of the object to download
model_dir (string, optional): directory in which to save the object
progress (bool, optional): whether or not to display a progress bar to stderr
Example:
>>> cached_file = maskrcnn_benchmark.utils.model_zoo.cache_url('https://s3.amazonaws.com/pytorch/models/resnet18-5c106cde.pth')
|
def extract(self, member, path="", set_attrs=True):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
"""
self._check("r")
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
# Prepare the link target for makelink().
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name),
set_attrs=set_attrs)
except EnvironmentError as e:
if self.errorlevel > 0:
raise
else:
if e.filename is None:
self._dbg(1, "tarfile: %s" % e.strerror)
else:
self._dbg(1, "tarfile: %s %r" % (e.strerror, e.filename))
except ExtractError as e:
if self.errorlevel > 1:
raise
else:
self._dbg(1, "tarfile: %s" % e)
|
Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a TarInfo object. You can
specify a different directory using `path'. File attributes (owner,
mtime, mode) are set unless `set_attrs' is False.
|
def is_compatible_assembly_level(self, ncbi_assembly_level):
"""Check if a given ncbi assembly level string matches the configured assembly levels."""
configured_ncbi_strings = [self._LEVELS[level] for level in self.assembly_level]
return ncbi_assembly_level in configured_ncbi_strings
|
Check if a given ncbi assembly level string matches the configured assembly levels.
|
def add(self, game_object: Hashable, tags: Iterable[Hashable]=()) -> None:
"""
Add a game_object to the container.
game_object: Any Hashable object. The item to be added.
tags: An iterable of Hashable objects. Values that can be used to
retrieve a group containing the game_object.
Examples:
container.add(MyObject())
container.add(MyObject(), tags=("red", "blue")
"""
if isinstance(tags, (str, bytes)):
raise TypeError("You passed a string instead of an iterable, this probably isn't what you intended.\n\nTry making it a tuple.")
self.all.add(game_object)
for kind in type(game_object).mro():
self.kinds[kind].add(game_object)
for tag in tags:
self.tags[tag].add(game_object)
|
Add a game_object to the container.
game_object: Any Hashable object. The item to be added.
tags: An iterable of Hashable objects. Values that can be used to
retrieve a group containing the game_object.
Examples:
container.add(MyObject())
container.add(MyObject(), tags=("red", "blue")
|
def spkcov(spk, idcode, cover=None):
"""
Find the coverage window for a specified ephemeris object in a
specified SPK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcov_c.html
:param spk: Name of SPK file.
:type spk: str
:param idcode: ID code of ephemeris object.
:type idcode: int
:param cover: Optional SPICE Window giving coverage in "spk" for "idcode".
:type cover: spiceypy.utils.support_types.SpiceCell
"""
spk = stypes.stringToCharP(spk)
idcode = ctypes.c_int(idcode)
if cover is None:
cover = stypes.SPICEDOUBLE_CELL(2000)
else:
assert isinstance(cover, stypes.SpiceCell)
assert cover.is_double()
libspice.spkcov_c(spk, idcode, ctypes.byref(cover))
return cover
|
Find the coverage window for a specified ephemeris object in a
specified SPK file.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkcov_c.html
:param spk: Name of SPK file.
:type spk: str
:param idcode: ID code of ephemeris object.
:type idcode: int
:param cover: Optional SPICE Window giving coverage in "spk" for "idcode".
:type cover: spiceypy.utils.support_types.SpiceCell
|
def preTranslate(self, tx, ty):
"""Calculate pre translation and replace current matrix."""
self.e += tx * self.a + ty * self.c
self.f += tx * self.b + ty * self.d
return self
|
Calculate pre translation and replace current matrix.
|
def _quantile_function(self, alpha=0.5, smallest_count=None):
"""Return a function that returns the quantile values for this
histogram.
"""
total = float(self.total())
smallest_observed_count = min(itervalues(self))
if smallest_count is None:
smallest_count = smallest_observed_count
else:
smallest_count = min(smallest_count, smallest_observed_count)
beta = alpha * smallest_count
debug_plot = []
cumulative_sum = 0.0
inverse = sortedcontainers.SortedDict()
for value, count in iteritems(self):
debug_plot.append((cumulative_sum / total, value))
inverse[(cumulative_sum + beta) / total] = value
cumulative_sum += count
inverse[(cumulative_sum - beta) / total] = value
debug_plot.append((cumulative_sum / total, value))
# get maximum and minumum q values
q_min = inverse.iloc[0]
q_max = inverse.iloc[-1]
# this stuff if helpful for debugging -- keep it in here
# for i, j in debug_plot:
# print i, j
# print ''
# for i, j in inverse.iteritems():
# print i, j
# print ''
def function(q):
if q < 0.0 or q > 1.0:
msg = 'invalid quantile %s, need `0 <= q <= 1`' % q
raise ValueError(msg)
elif q < q_min:
q = q_min
elif q > q_max:
q = q_max
# if beta is
if beta > 0:
if q in inverse:
result = inverse[q]
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = (y2 - y1) * (q - x1) / float(x2 - x1) + y1
else:
if q in inverse:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
x2 = inverse.iloc[previous_index + 1]
y1 = inverse[x1]
y2 = inverse[x2]
result = 0.5 * (y1 + y2)
else:
previous_index = inverse.bisect_left(q) - 1
x1 = inverse.iloc[previous_index]
result = inverse[x1]
return float(result)
return function
|
Return a function that returns the quantile values for this
histogram.
|
def catch(ignore=[],
was_doing="something important",
helpfull_tips="you should use a debugger",
gbc=None):
"""
Catch, prepare and log error
:param exc_cls: error class
:param exc: exception
:param tb: exception traceback
"""
exc_cls, exc, tb=sys.exc_info()
if exc_cls in ignore:
msg='exception in ignorelist'
gbc.say('ignoring caught:'+str(exc_cls))
return 'exception in ignorelist'
ex_message = traceback.format_exception_only(exc_cls, exc)[-1]
ex_message = ex_message.strip()
# TODO: print(ex_message)
error_frame = tb
while error_frame.tb_next is not None:
error_frame = error_frame.tb_next
file = error_frame.tb_frame.f_code.co_filename
line = error_frame.tb_lineno
stack = traceback.extract_tb(tb)
formated_stack = []
for summary in stack:
formated_stack.append({
'file': summary[0],
'line': summary[1],
'func': summary[2],
'text': summary[3]
})
event = {
'was_doing':was_doing,
'message': ex_message,
'errorLocation': {
'file': file,
'line': line,
'full': file + ' -> ' + str(line)
},
'stack': formated_stack
#,
#'time': time.time()
}
try:
#logging.info('caught:'+pformat(event))
gbc.cry('caught:'+pformat(event))
print('Bubble3: written error to log')
print('Bubble3: tips for fixing this:')
print(helpfull_tips)
except Exception as e:
print('Bubble3: cant log error cause of %s' % e)
|
Catch, prepare and log error
:param exc_cls: error class
:param exc: exception
:param tb: exception traceback
|
def list_resources_with_long_filters(list_method,
filter_attr, filter_values, **params):
"""List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
"""
try:
params[filter_attr] = filter_values
return list_method(**params)
except neutron_exc.RequestURITooLong as uri_len_exc:
# The URI is too long because of too many filter values.
# Use the excess attribute of the exception to know how many
# filter values can be inserted into a single request.
# We consider only the filter condition from (filter_attr,
# filter_values) and do not consider other filter conditions
# which may be specified in **params.
if not isinstance(filter_values, (list, tuple, set, frozenset)):
filter_values = [filter_values]
# Length of each query filter is:
# <key>=<value>& (e.g., id=<uuid>)
# The length will be key_len + value_maxlen + 2
all_filter_len = sum(len(filter_attr) + len(val) + 2
for val in filter_values)
allowed_filter_len = all_filter_len - uri_len_exc.excess
val_maxlen = max(len(val) for val in filter_values)
filter_maxlen = len(filter_attr) + val_maxlen + 2
chunk_size = allowed_filter_len // filter_maxlen
resources = []
for i in range(0, len(filter_values), chunk_size):
params[filter_attr] = filter_values[i:i + chunk_size]
resources.extend(list_method(**params))
return resources
|
List neutron resources with handling RequestURITooLong exception.
If filter parameters are long, list resources API request leads to
414 error (URL is too long). For such case, this method split
list parameters specified by a list_field argument into chunks
and call the specified list_method repeatedly.
:param list_method: Method used to retrieve resource list.
:param filter_attr: attribute name to be filtered. The value corresponding
to this attribute is specified by "filter_values".
If you want to specify more attributes for a filter condition,
pass them as keyword arguments like "attr2=values2".
:param filter_values: values of "filter_attr" to be filtered.
If filter_values are too long and the total URI length exceed the
maximum length supported by the neutron server, filter_values will
be split into sub lists if filter_values is a list.
:param params: parameters to pass a specified listing API call
without any changes. You can specify more filter conditions
in addition to a pair of filter_attr and filter_values.
|
def all_resource_urls(query):
''' Get all the URLs for every resource '''
urls = []
next = True
while next:
response = requests.get(query)
json_data = json.loads(response.content)
for resource in json_data['results']:
urls.append(resource['url'])
if bool(json_data['next']):
query = json_data['next']
else:
next = False
return urls
|
Get all the URLs for every resource
|
def _create(cls, repo, path, resolve, reference, force, logmsg=None):
"""internal method used to create a new symbolic reference.
If resolve is False, the reference will be taken as is, creating
a proper symbolic reference. Otherwise it will be resolved to the
corresponding object and a detached symbolic reference will be created
instead"""
git_dir = _git_dir(repo, path)
full_ref_path = cls.to_full_path(path)
abs_ref_path = osp.join(git_dir, full_ref_path)
# figure out target data
target = reference
if resolve:
target = repo.rev_parse(str(reference))
if not force and osp.isfile(abs_ref_path):
target_data = str(target)
if isinstance(target, SymbolicReference):
target_data = target.path
if not resolve:
target_data = "ref: " + target_data
with open(abs_ref_path, 'rb') as fd:
existing_data = fd.read().decode(defenc).strip()
if existing_data != target_data:
raise OSError("Reference at %r does already exist, pointing to %r, requested was %r" %
(full_ref_path, existing_data, target_data))
# END no force handling
ref = cls(repo, full_ref_path)
ref.set_reference(target, logmsg)
return ref
|
internal method used to create a new symbolic reference.
If resolve is False, the reference will be taken as is, creating
a proper symbolic reference. Otherwise it will be resolved to the
corresponding object and a detached symbolic reference will be created
instead
|
def _conf(cls, opts):
"""Setup logging via ini-file from logging_conf_file option."""
logging_conf = cls.config.get('core', 'logging_conf_file', None)
if logging_conf is None:
return False
if not os.path.exists(logging_conf):
# FileNotFoundError added only in Python 3.3
# https://docs.python.org/3/whatsnew/3.3.html#pep-3151-reworking-the-os-and-io-exception-hierarchy
raise OSError("Error: Unable to locate specified logging configuration file!")
logging.config.fileConfig(logging_conf)
return True
|
Setup logging via ini-file from logging_conf_file option.
|
def add_term(self,term_obj):
"""
Adds a term to the term layer
@type term_obj: L{Cterm}
@param term_obj: the term object
"""
if self.term_layer is None:
self.term_layer = Cterms(type=self.type)
self.root.append(self.term_layer.get_node())
self.term_layer.add_term(term_obj)
|
Adds a term to the term layer
@type term_obj: L{Cterm}
@param term_obj: the term object
|
def get_active(cls, database, conditions=""):
"""
Gets active data from system.parts table
:param database: A database object to fetch data from.
:param conditions: WHERE clause conditions. Database and active conditions are added automatically
:return: A list of SystemPart objects
"""
if conditions:
conditions += ' AND '
conditions += 'active'
return SystemPart.get(database, conditions=conditions)
|
Gets active data from system.parts table
:param database: A database object to fetch data from.
:param conditions: WHERE clause conditions. Database and active conditions are added automatically
:return: A list of SystemPart objects
|
def read_range(self, begin: str, end: str) -> int:
"""
Consume head byte if it is >= begin and <= end else return false
Same as 'a'..'z' in BNF
"""
if self.read_eof():
return False
c = self._stream.peek_char
if begin <= c <= end:
self._stream.incpos()
return True
return False
|
Consume head byte if it is >= begin and <= end else return false
Same as 'a'..'z' in BNF
|
def wait_for_close(
raiden: 'RaidenService',
payment_network_id: PaymentNetworkID,
token_address: TokenAddress,
channel_ids: List[ChannelID],
retry_timeout: float,
) -> None:
"""Wait until all channels are closed.
Note:
This does not time out, use gevent.Timeout.
"""
return wait_for_channel_in_states(
raiden=raiden,
payment_network_id=payment_network_id,
token_address=token_address,
channel_ids=channel_ids,
retry_timeout=retry_timeout,
target_states=CHANNEL_AFTER_CLOSE_STATES,
)
|
Wait until all channels are closed.
Note:
This does not time out, use gevent.Timeout.
|
def delete_comment(self, project, work_item_id, comment_id):
"""DeleteComment.
[Preview API] Delete a comment on a work item.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item.
:param int comment_id:
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if work_item_id is not None:
route_values['workItemId'] = self._serialize.url('work_item_id', work_item_id, 'int')
if comment_id is not None:
route_values['commentId'] = self._serialize.url('comment_id', comment_id, 'int')
self._send(http_method='DELETE',
location_id='608aac0a-32e1-4493-a863-b9cf4566d257',
version='5.1-preview.3',
route_values=route_values)
|
DeleteComment.
[Preview API] Delete a comment on a work item.
:param str project: Project ID or project name
:param int work_item_id: Id of a work item.
:param int comment_id:
|
def build_def_use(graph, lparams):
"""
Builds the Def-Use and Use-Def (DU/UD) chains of the variables of the
method.
"""
analysis = reach_def_analysis(graph, lparams)
UD = defaultdict(list)
for node in graph.rpo:
for i, ins in node.get_loc_with_ins():
for var in ins.get_used_vars():
# var not in analysis.def_to_loc: test that the register
# exists. It is possible that it is not the case, when a
# variable is of a type which is stored on multiple registers
# e.g: a 'double' stored in v3 is also present in v4, so a call
# to foo(v3), will in fact call foo(v3, v4).
if var not in analysis.def_to_loc:
continue
ldefs = analysis.defs[node]
prior_def = -1
for v in ldefs.get(var, set()):
if prior_def < v < i:
prior_def = v
if prior_def >= 0:
UD[var, i].append(prior_def)
else:
intersect = analysis.def_to_loc[var].intersection(
analysis.R[node])
UD[var, i].extend(intersect)
DU = defaultdict(list)
for var_loc, defs_loc in UD.items():
var, loc = var_loc
for def_loc in defs_loc:
DU[var, def_loc].append(loc)
return UD, DU
|
Builds the Def-Use and Use-Def (DU/UD) chains of the variables of the
method.
|
def ConvertValues(default_metadata, values, token=None, options=None):
"""Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: export.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.
values: Values to convert. They should be of the same type.
token: Security token.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Returns:
Converted values. Converted values may be of different types
(unlike the source values which are all of the same type). This is due to
the fact that multiple ExportConverters may be applied to the same value
thus generating multiple converted values of different types.
Raises:
NoConverterFound: in case no suitable converters were found for the values.
"""
batch_data = [(default_metadata, obj) for obj in values]
return ConvertValuesWithMetadata(batch_data, token=token, options=options)
|
Converts a set of RDFValues into a set of export-friendly RDFValues.
Args:
default_metadata: export.ExportedMetadata instance with basic information
about where the values come from. This metadata will be passed to
exporters.
values: Values to convert. They should be of the same type.
token: Security token.
options: rdfvalue.ExportOptions instance that will be passed to
ExportConverters.
Returns:
Converted values. Converted values may be of different types
(unlike the source values which are all of the same type). This is due to
the fact that multiple ExportConverters may be applied to the same value
thus generating multiple converted values of different types.
Raises:
NoConverterFound: in case no suitable converters were found for the values.
|
def login(self):
"""
View function to log a user in. Supports html and json requests.
"""
form = self._get_form('SECURITY_LOGIN_FORM')
if form.validate_on_submit():
try:
self.security_service.login_user(form.user, form.remember.data)
except AuthenticationError as e:
form._errors = {'_error': [str(e)]}
else:
self.after_this_request(self._commit)
if request.is_json:
return self.jsonify({'token': form.user.get_auth_token(),
'user': form.user})
self.flash(_('flask_unchained.bundles.security:flash.login'),
category='success')
return self.redirect('SECURITY_POST_LOGIN_REDIRECT_ENDPOINT')
else:
# FIXME-identity
identity_attrs = app.config.SECURITY_USER_IDENTITY_ATTRIBUTES
msg = f"Invalid {', '.join(identity_attrs)} and/or password."
# we just want a single top-level form error
form._errors = {'_error': [msg]}
for field in form._fields.values():
field.errors = None
if form.errors and request.is_json:
return self.jsonify({'error': form.errors.get('_error')[0]},
code=HTTPStatus.UNAUTHORIZED)
return self.render('login',
login_user_form=form,
**self.security.run_ctx_processor('login'))
|
View function to log a user in. Supports html and json requests.
|
def split(*items):
"""Split samples into all possible genomes for alignment.
"""
out = []
for data in [x[0] for x in items]:
dis_orgs = data["config"]["algorithm"].get("disambiguate")
if dis_orgs:
if not data.get("disambiguate", None):
data["disambiguate"] = {"genome_build": data["genome_build"],
"base": True}
out.append([data])
# handle the instance where a single organism is disambiguated
if isinstance(dis_orgs, six.string_types):
dis_orgs = [dis_orgs]
for dis_org in dis_orgs:
dis_data = copy.deepcopy(data)
dis_data["disambiguate"] = {"genome_build": dis_org}
dis_data["genome_build"] = dis_org
dis_data["config"]["algorithm"]["effects"] = False
dis_data = run_info.add_reference_resources(dis_data)
out.append([dis_data])
else:
out.append([data])
return out
|
Split samples into all possible genomes for alignment.
|
def refresh_rooms(self):
"""Calls GET /joined_rooms to refresh rooms list."""
for room_id in self.user_api.get_joined_rooms()["joined_rooms"]:
self._rooms[room_id] = MatrixRoom(room_id, self.user_api)
|
Calls GET /joined_rooms to refresh rooms list.
|
def __get_average_inter_cluster_distance(self, entry):
"""!
@brief Calculates average inter cluster distance between current and specified clusters.
@param[in] entry (cfentry): Clustering feature to which distance should be obtained.
@return (double) Average inter cluster distance.
"""
linear_part_distance = sum(list_math_multiplication(self.linear_sum, entry.linear_sum));
return ( (entry.number_points * self.square_sum - 2.0 * linear_part_distance + self.number_points * entry.square_sum) / (self.number_points * entry.number_points) ) ** 0.5;
|
!
@brief Calculates average inter cluster distance between current and specified clusters.
@param[in] entry (cfentry): Clustering feature to which distance should be obtained.
@return (double) Average inter cluster distance.
|
def close(self):
""" Close the policy instance. """
self._logger.info("Closing")
if self._opened:
self._opened = False
else:
self._logger.warning(
"close() called, but connection policy was alredy closed")
return
|
Close the policy instance.
|
def setex(self, key, seconds, value):
"""Set the value and expiration of a key.
If seconds is float it will be multiplied by 1000
coerced to int and passed to `psetex` method.
:raises TypeError: if seconds is neither int nor float
"""
if isinstance(seconds, float):
return self.psetex(key, int(seconds * 1000), value)
if not isinstance(seconds, int):
raise TypeError("milliseconds argument must be int")
fut = self.execute(b'SETEX', key, seconds, value)
return wait_ok(fut)
|
Set the value and expiration of a key.
If seconds is float it will be multiplied by 1000
coerced to int and passed to `psetex` method.
:raises TypeError: if seconds is neither int nor float
|
def process_xlsx(content):
"""
Turn Excel file contents into Tarbell worksheet data
"""
data = {}
workbook = xlrd.open_workbook(file_contents=content)
worksheets = [w for w in workbook.sheet_names() if not w.startswith('_')]
for worksheet_name in worksheets:
if worksheet_name.startswith('_'):
continue
worksheet = workbook.sheet_by_name(worksheet_name)
merged_cells = worksheet.merged_cells
if len(merged_cells):
raise MergedCellError(worksheet.name, merged_cells)
worksheet.name = slughifi(worksheet.name)
headers = make_headers(worksheet)
worksheet_data = make_worksheet_data(headers, worksheet)
data[worksheet.name] = worksheet_data
return data
|
Turn Excel file contents into Tarbell worksheet data
|
def _get_ignore_from_manifest_lines(lines):
"""Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
"""
ignore = []
ignore_regexps = []
for line in lines:
try:
cmd, rest = line.split(None, 1)
except ValueError:
# no whitespace, so not interesting
continue
for part in rest.split():
# distutils enforces these warnings on Windows only
if part.startswith('/'):
warning("ERROR: Leading slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if part.endswith('/'):
warning("ERROR: Trailing slashes are not allowed in MANIFEST.in on Windows: %s" % part)
if cmd == 'exclude':
# An exclude of 'dirname/*css' can match 'dirname/foo.css'
# but not 'dirname/subdir/bar.css'. We need a regular
# expression for that, since fnmatch doesn't pay attention to
# directory separators.
for pat in rest.split():
if '*' in pat or '?' in pat or '[!' in pat:
ignore_regexps.append(_glob_to_regexp(pat))
else:
# No need for special handling.
ignore.append(pat)
elif cmd == 'global-exclude':
ignore.extend(rest.split())
elif cmd == 'recursive-exclude':
try:
dirname, patterns = rest.split(None, 1)
except ValueError:
# Wrong MANIFEST.in line.
warning("You have a wrong line in MANIFEST.in: %r\n"
"'recursive-exclude' expects <dir> <pattern1> "
"<pattern2> ..." % line)
continue
# Strip path separator for clarity.
dirname = dirname.rstrip(os.path.sep)
for pattern in patterns.split():
if pattern.startswith('*'):
ignore.append(dirname + os.path.sep + pattern)
else:
# 'recursive-exclude plone metadata.xml' should
# exclude plone/metadata.xml and
# plone/*/metadata.xml, where * can be any number
# of sub directories. We could use a regexp, but
# two ignores seems easier.
ignore.append(dirname + os.path.sep + pattern)
ignore.append(
dirname + os.path.sep + '*' + os.path.sep + pattern)
elif cmd == 'prune':
# rest is considered to be a directory name. It should
# not contain a path separator, as it actually has no
# effect in that case, but that could differ per python
# version. We strip it here to avoid double separators.
# XXX: mg: I'm not 100% sure the above is correct, AFAICS
# all pythons from 2.6 complain if the path has a leading or
# trailing slash -- on Windows, that is.
rest = rest.rstrip('/\\')
ignore.append(rest)
ignore.append(rest + os.path.sep + '*')
return ignore, ignore_regexps
|
Gather the various ignore patterns from a MANIFEST.in.
'lines' should be a list of strings with comments removed
and continuation lines joined.
Returns a list of standard ignore patterns and a list of regular
expressions to ignore.
|
def save_scenario(self, scenario_file_path=None):
"""Save current scenario to a text file.
You can use the saved scenario with the batch runner.
:param scenario_file_path: A path to the scenario file.
:type scenario_file_path: str
"""
# Validate Input
warning_title = tr('InaSAFE Save Scenario Warning')
is_valid, warning_message = self.validate_input()
if not is_valid:
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
QMessageBox.warning(self, warning_title, warning_message)
return
# Make extent to look like:
# 109.829170982, -8.13333290561, 111.005344795, -7.49226294379
# Added in 2.2 to support user defined analysis extents
if self.dock.extent.user_extent is not None \
and self.dock.extent.crs is not None:
# In V4.0, user_extent is QgsGeometry.
user_extent = self.dock.extent.user_extent.boundingBox()
extent = extent_to_array(user_extent, self.dock.extent.crs)
else:
extent = viewport_geo_array(self.iface.mapCanvas())
extent_string = ', '.join(('%f' % x) for x in extent)
exposure_path = self.exposure_layer.source()
hazard_path = self.hazard_layer.source()
title = self.keyword_io.read_keywords(self.hazard_layer, 'title')
title = tr(title)
default_filename = title.replace(
' ', '_').replace('(', '').replace(')', '')
# Popup a dialog to request the filename if scenario_file_path = None
dialog_title = tr('Save Scenario')
if scenario_file_path is None:
# noinspection PyCallByClass,PyTypeChecker
scenario_file_path, __ = QFileDialog.getSaveFileName(
self,
dialog_title,
os.path.join(self.output_directory, default_filename + '.txt'),
"Text files (*.txt)")
if scenario_file_path is None or scenario_file_path == '':
return
self.output_directory = os.path.dirname(scenario_file_path)
# Write to file
parser = ConfigParser()
parser.add_section(title)
# Relative path is not recognized by the batch runner, so we use
# absolute path.
parser.set(title, 'exposure', exposure_path)
parser.set(title, 'hazard', hazard_path)
parser.set(title, 'extent', extent_string)
if self.dock.extent.crs is None:
parser.set(title, 'extent_crs', 'EPSG:4326')
else:
parser.set(
title,
'extent_crs',
self.dock.extent.crs.authid())
if self.aggregation_layer is not None:
aggregation_path = self.aggregation_layer.source()
relative_aggregation_path = self.relative_path(
scenario_file_path, aggregation_path)
parser.set(title, 'aggregation', relative_aggregation_path)
# noinspection PyBroadException
try:
of = open(scenario_file_path, 'a')
parser.write(of)
of.close()
except Exception as e:
# noinspection PyTypeChecker,PyCallByClass,PyArgumentList
QMessageBox.warning(
self,
'InaSAFE',
tr(
'Failed to save scenario to {path}, exception '
'{exception}').format(
path=scenario_file_path, exception=str(e)))
finally:
of.close()
# Save State
self.save_state()
|
Save current scenario to a text file.
You can use the saved scenario with the batch runner.
:param scenario_file_path: A path to the scenario file.
:type scenario_file_path: str
|
def __create_core_and_model_object_copies(self, selection, smart_selection_adaption):
"""Copy all elements of a selection.
The method copies all objects and modifies the selection before copying the elements if the smart flag is true.
The smart selection adaption is by default enabled. In any case the selection is reduced to have one parent
state that is used as the root of copy, except a root state it self is selected.
:param Selection selection: an arbitrary selection, whose elements should be copied
.param bool smart_selection_adaption: flag to enable smart selection adaptation mode
:return: dictionary of selected models copied, parent model of copy
"""
all_models_selected = selection.get_all()
if not all_models_selected:
logger.warning("Nothing to copy because state machine selection is empty.")
return
parent_m = self.do_selection_reduction_to_one_parent(selection)
self.copy_parent_state_id = parent_m.state.state_id if parent_m else None
if smart_selection_adaption:
self.do_smart_selection_adaption(selection, parent_m)
# store all lists of selection
selected_models_dict = {}
for state_element_attr in ContainerState.state_element_attrs:
selected_models_dict[state_element_attr] = list(getattr(selection, state_element_attr))
# delete old models
self.destroy_all_models_in_dict(self.model_copies)
# copy all selected elements
self.model_copies = deepcopy(selected_models_dict)
new_content_of_clipboard = ', '.join(["{0} {1}".format(len(elems), key if len(elems) > 1 else key[:-1])
for key, elems in self.model_copies.items() if elems])
logger.info("The new content is {0}".format(new_content_of_clipboard.replace('_', ' ')))
return selected_models_dict, parent_m
|
Copy all elements of a selection.
The method copies all objects and modifies the selection before copying the elements if the smart flag is true.
The smart selection adaption is by default enabled. In any case the selection is reduced to have one parent
state that is used as the root of copy, except a root state it self is selected.
:param Selection selection: an arbitrary selection, whose elements should be copied
.param bool smart_selection_adaption: flag to enable smart selection adaptation mode
:return: dictionary of selected models copied, parent model of copy
|
def validate(collection, onerror: Callable[[str, List], None] = None):
"""Validate BioC data structure."""
BioCValidator(onerror).validate(collection)
|
Validate BioC data structure.
|
def fromPy(cls, val, typeObj, vldMask=None):
"""
:param val: value of python type bool or None
:param typeObj: instance of HdlType
:param vldMask: None vldMask is resolved from val,
if is 0 value is invalidated
if is 1 value has to be valid
"""
vld = int(val is not None)
if not vld:
assert vldMask is None or vldMask == 0
val = False
else:
if vldMask == 0:
val = False
vld = 0
else:
val = bool(val)
return cls(val, typeObj, vld)
|
:param val: value of python type bool or None
:param typeObj: instance of HdlType
:param vldMask: None vldMask is resolved from val,
if is 0 value is invalidated
if is 1 value has to be valid
|
def bind_env(self, action, env):
""" Bind an environment variable to an argument action. The env
value will traditionally be something uppercase like `MYAPP_FOO_ARG`.
Note that the ENV value is assigned using `set_defaults()` and as such
it will be overridden if the argument is set via `parse_args()` """
if env in self._env_actions:
raise ValueError('Duplicate ENV variable: %s' % env)
self._env_actions[env] = action
action.env = env
|
Bind an environment variable to an argument action. The env
value will traditionally be something uppercase like `MYAPP_FOO_ARG`.
Note that the ENV value is assigned using `set_defaults()` and as such
it will be overridden if the argument is set via `parse_args()`
|
def str_dict_keys(a_dict):
"""return a modified dict where all the keys that are anything but str get
converted to str.
E.g.
>>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
>>> # can't compare whole dicts in doctests
>>> result['name']
u'Peter'
>>> result['age']
99
>>> result[1]
2
The reason for this is that in Python <= 2.6.4 doing
``MyClass(**{u'name': u'Peter'})`` would raise a TypeError
Note that only unicode types are converted to str types.
The reason for that is you might have a class that looks like this::
class Option(object):
def __init__(self, foo=None, bar=None, **kwargs):
...
And it's being used like this::
Option(**{u'foo':1, u'bar':2, 3:4})
Then you don't want to change that {3:4} part which becomes part of
`**kwargs` inside the __init__ method.
Using integers as parameter keys is a silly example but the point is that
due to the python 2.6.4 bug only unicode keys are converted to str.
"""
new_dict = {}
for key in a_dict:
if six.PY2 and isinstance(key, six.text_type):
new_dict[str(key)] = a_dict[key]
else:
new_dict[key] = a_dict[key]
return new_dict
|
return a modified dict where all the keys that are anything but str get
converted to str.
E.g.
>>> result = str_dict_keys({u'name': u'Peter', u'age': 99, 1: 2})
>>> # can't compare whole dicts in doctests
>>> result['name']
u'Peter'
>>> result['age']
99
>>> result[1]
2
The reason for this is that in Python <= 2.6.4 doing
``MyClass(**{u'name': u'Peter'})`` would raise a TypeError
Note that only unicode types are converted to str types.
The reason for that is you might have a class that looks like this::
class Option(object):
def __init__(self, foo=None, bar=None, **kwargs):
...
And it's being used like this::
Option(**{u'foo':1, u'bar':2, 3:4})
Then you don't want to change that {3:4} part which becomes part of
`**kwargs` inside the __init__ method.
Using integers as parameter keys is a silly example but the point is that
due to the python 2.6.4 bug only unicode keys are converted to str.
|
def handle_timeouts(self):
"""Handle timeouts. Raise timeouted operations with a OperationTimeout
in the associated coroutine (if they are still alive and the operation
hasn't actualy sucessfuly completed) or, if the operation has a
weak_timeout flag, update the timeout point and add it back in the
heapq.
weak_timeout notes:
* weak_timeout means a last_update attribute is updated with
a timestamp of the last activity in the operation - for example, a
may recieve new data and not complete (not enough data, etc)
* if there was activity since the last time we've cheched this
timeout we push it back in the heapq with a timeout value we'll check
it again
Also, we call a cleanup on the op, only if cleanup return true we raise
the timeout (finalized isn't enough to check if the op has completed
since finalized is set when the operation gets back in the coro - and
it might still be in the Scheduler.active queue when we get to this
timeout - well, this is certainly a problem magnet: TODO: fix_finalized)
"""
now = getnow()
#~ print '>to:', self.timeouts, self.timeouts and self.timeouts[0].timeout <= now
while self.timeouts and self.timeouts[0].timeout <= now:
op = heapq.heappop(self.timeouts)
coro = op.coro
if op.weak_timeout and hasattr(op, 'last_update'):
if op.last_update > op.last_checkpoint:
op.last_checkpoint = op.last_update
op.timeout = op.last_checkpoint + op.delta
heapq.heappush(self.timeouts, op)
continue
if op.state is events.RUNNING and coro and coro.running and \
op.cleanup(self, coro):
self.active.append((
CoroutineException(
events.OperationTimeout,
events.OperationTimeout(op)
),
coro
))
|
Handle timeouts. Raise timeouted operations with a OperationTimeout
in the associated coroutine (if they are still alive and the operation
hasn't actualy sucessfuly completed) or, if the operation has a
weak_timeout flag, update the timeout point and add it back in the
heapq.
weak_timeout notes:
* weak_timeout means a last_update attribute is updated with
a timestamp of the last activity in the operation - for example, a
may recieve new data and not complete (not enough data, etc)
* if there was activity since the last time we've cheched this
timeout we push it back in the heapq with a timeout value we'll check
it again
Also, we call a cleanup on the op, only if cleanup return true we raise
the timeout (finalized isn't enough to check if the op has completed
since finalized is set when the operation gets back in the coro - and
it might still be in the Scheduler.active queue when we get to this
timeout - well, this is certainly a problem magnet: TODO: fix_finalized)
|
def _chk_type(recdef, rec):
"""Checks if type of `rec` matches `recdef`
:param recdef: instance of RecordDef
:param rec: instance of Record
:raises: `TypeError`
"""
if len(recdef) != len(rec):
raise TypeError("Number of columns (%d) is different from RecordDef (%d)" % (len(rec), len(recdef)))
for i in xrange(len(recdef)):
try:
def_type = recdef[i].type
col_type = Type.equivalent_relshell_type(rec[i])
if col_type != def_type:
raise TypeError("Column %d has mismatched type: Got '%s' [%s] ; Expected [%s]" %
(i, rec[i], col_type, def_type))
except AttributeError as e:
# recdef[i].type is not defined, then any relshell type is allowed
try:
Type.equivalent_relshell_type(rec[i])
except NotImplementedError as e:
raise TypeError("%s" % (e))
|
Checks if type of `rec` matches `recdef`
:param recdef: instance of RecordDef
:param rec: instance of Record
:raises: `TypeError`
|
def result(self, wait=0):
"""
return the full list of results.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results
"""
if self.started:
return result(self.id, wait=wait, cached=self.cached)
|
return the full list of results.
:param int wait: how many milliseconds to wait for a result
:return: an unsorted list of results
|
def _checkResponseWriteData(payload, writedata):
"""Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError
"""
_checkString(payload, minlength=4, description='payload')
_checkString(writedata, minlength=2, maxlength=2, description='writedata')
BYTERANGE_FOR_WRITEDATA = slice(2, 4)
receivedWritedata = payload[BYTERANGE_FOR_WRITEDATA]
if receivedWritedata != writedata:
raise ValueError('Wrong write data in the response: {0!r}, but commanded is {1!r}. The data payload is: {2!r}'.format( \
receivedWritedata, writedata, payload))
|
Check that the write data as given in the response is correct.
The bytes 2 and 3 (zero based counting) in the payload holds the write data.
Args:
* payload (string): The payload
* writedata (string): The data to write, length should be 2 bytes.
Raises:
TypeError, ValueError
|
def stop_recording(self):
"""Stop recording from the audio source."""
self._stop_recording.set()
with self._source_lock:
self._source.stop()
self._recording = False
|
Stop recording from the audio source.
|
def add_episode(self, text, text_format, title, author, summary=None,
publish_date=None, synthesizer='watson', synth_args=None, sentence_break='. '):
"""
Add a new episode to the podcast.
:param text:
See :meth:`Episode`.
:param text_format:
See :meth:`Episode`.
:param title:
See :meth:`Episode`.
:param author:
See :meth:`Episode`.
:param summary:
See :meth:`Episode`.
:param publish_date:
See :meth:`Episode`.
:param synthesizer:
See :meth:`typecaster.utils.text_to_speech`.
:param synth_args:
See :meth:`typecaster.utils.text_to_speech`.
:param sentence_break:
See :meth:`typecaster.utils.text_to_speech`.
"""
if title in self.episodes:
raise ValueError('"' + title + '" already exists as an episode title.')
link = self.output_path + '/' + title.replace(' ', '_').lower() + '.mp3'
episode_text = convert_to_ssml(text, text_format)
new_episode = Episode(episode_text, text_format, title, author, link, summary, publish_date, synthesizer, synth_args, sentence_break)
self.episodes[title] = new_episode
|
Add a new episode to the podcast.
:param text:
See :meth:`Episode`.
:param text_format:
See :meth:`Episode`.
:param title:
See :meth:`Episode`.
:param author:
See :meth:`Episode`.
:param summary:
See :meth:`Episode`.
:param publish_date:
See :meth:`Episode`.
:param synthesizer:
See :meth:`typecaster.utils.text_to_speech`.
:param synth_args:
See :meth:`typecaster.utils.text_to_speech`.
:param sentence_break:
See :meth:`typecaster.utils.text_to_speech`.
|
def ls(ctx, name):
"""List EMR instances"""
session = create_session(ctx.obj['AWS_PROFILE_NAME'])
client = session.client('emr')
results = client.list_clusters(
ClusterStates=['RUNNING', 'STARTING', 'BOOTSTRAPPING', 'WAITING']
)
for cluster in results['Clusters']:
click.echo("{0}\t{1}\t{2}".format(cluster['Id'], cluster['Name'], cluster['Status']['State']))
|
List EMR instances
|
def apply_vcc(self,vcc):
"""
Applies velocity contrast curve constraint to each population
See :func:`vespa.stars.StarPopulation.apply_vcc`;
all arguments passed to that function for each population.
"""
if 'secondary spectrum' not in self.constraints:
self.constraints.append('secondary spectrum')
for pop in self.poplist:
if not pop.is_specific:
try:
pop.apply_vcc(vcc)
except:
logging.info('VCC constraint not applied to %s model' % (pop.model))
|
Applies velocity contrast curve constraint to each population
See :func:`vespa.stars.StarPopulation.apply_vcc`;
all arguments passed to that function for each population.
|
def _validate_importers(importers):
"""Validates the importers and decorates the callables with our output
formatter.
"""
# They could have no importers, that's chill
if importers is None:
return None
def _to_importer(priority, func):
assert isinstance(priority, int), priority
assert callable(func), func
return (priority, _importer_callback_wrapper(func))
# Our code assumes tuple of tuples
return tuple(_to_importer(priority, func) for priority, func in importers)
|
Validates the importers and decorates the callables with our output
formatter.
|
def GetEntries(
self, parser_mediator, cookie_data=None, url=None, **kwargs):
"""Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
"""
fields = cookie_data.split('.')
number_of_fields = len(fields)
if number_of_fields not in (1, 4):
parser_mediator.ProduceExtractionWarning(
'unsupported number of fields: {0:d} in cookie: {1:s}'.format(
number_of_fields, self.COOKIE_NAME))
return
if number_of_fields == 1:
domain_hash = None
try:
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[0], 10) / 10000000
except ValueError:
last_visit_posix_time = None
number_of_pages_viewed = None
elif number_of_fields == 4:
domain_hash = fields[0]
try:
number_of_pages_viewed = int(fields[1], 10)
except ValueError:
number_of_pages_viewed = None
try:
if fields[2] in ('8', '9'):
# TODO: fix that we're losing precision here use dfdatetime.
last_visit_posix_time = int(fields[3], 10) / 1000
else:
last_visit_posix_time = int(fields[3], 10)
except ValueError:
last_visit_posix_time = None
if last_visit_posix_time is not None:
date_time = dfdatetime_posix_time.PosixTime(
timestamp=last_visit_posix_time)
timestamp_description = definitions.TIME_DESCRIPTION_LAST_VISITED
else:
date_time = dfdatetime_semantic_time.SemanticTime('Not set')
timestamp_description = definitions.TIME_DESCRIPTION_NOT_A_TIME
event_data = GoogleAnalyticsEventData('utmb')
event_data.cookie_name = self.COOKIE_NAME
event_data.domain_hash = domain_hash
event_data.pages_viewed = number_of_pages_viewed
event_data.url = url
event = time_events.DateTimeValuesEvent(date_time, timestamp_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts event objects from the cookie.
Args:
parser_mediator (ParserMediator): parser mediator.
cookie_data (bytes): cookie data.
url (str): URL or path where the cookie got set.
|
def hide(self):
"""Overrides Qt Method"""
for widget in self.replace_widgets:
widget.hide()
QWidget.hide(self)
self.visibility_changed.emit(False)
if self.editor is not None:
self.editor.setFocus()
self.clear_matches()
|
Overrides Qt Method
|
def get_groups_by_userid(cls, userid, request):
""" Return group identifiers of user with id :userid:
Used by Ticket-based auth as `callback` kwarg.
"""
try:
cache_request_user(cls, request, userid)
except Exception as ex:
log.error(str(ex))
forget(request)
else:
if request._user:
return ['g:%s' % g for g in request._user.groups]
|
Return group identifiers of user with id :userid:
Used by Ticket-based auth as `callback` kwarg.
|
def get_root_url(url, warn=True):
"""
Get the "root URL" for a URL, as described in the LuminosoClient
documentation.
"""
parsed_url = urlparse(url)
# Make sure it's a complete URL, not a relative one
if not parsed_url.scheme:
raise ValueError('Please supply a full URL, beginning with http:// '
'or https:// .')
# Issue a warning if the path didn't already start with /api/v4
root_url = '%s://%s/api/v4' % (parsed_url.scheme, parsed_url.netloc)
if warn and not parsed_url.path.startswith('/api/v4'):
logger.warning('Using %s as the root url' % root_url)
return root_url
|
Get the "root URL" for a URL, as described in the LuminosoClient
documentation.
|
def seek(self, offset, whence=os.SEEK_SET):
"""Seek to position in stream, see file.seek"""
pos = None
if whence == os.SEEK_SET:
pos = self.offset + offset
elif whence == os.SEEK_CUR:
pos = self.tell() + offset
elif whence == os.SEEK_END:
pos = self.offset + self.len + offset
else:
raise ValueError("invalid whence {}".format(whence))
if pos > self.offset + self.len or pos < self.offset:
raise ValueError("seek position beyond chunk area")
self.parent_fd.seek(pos, os.SEEK_SET)
|
Seek to position in stream, see file.seek
|
def as_string(self, default_from=None):
"""Creates the email"""
encoding = self.charset or 'utf-8'
attachments = self.attachments or []
if len(attachments) == 0 and not self.html:
# No html content and zero attachments means plain text
msg = self._mimetext(self.body)
elif len(attachments) > 0 and not self.html:
# No html and at least one attachment means multipart
msg = MIMEMultipart()
msg.attach(self._mimetext(self.body))
else:
# Anything else
msg = MIMEMultipart()
alternative = MIMEMultipart('alternative')
alternative.attach(self._mimetext(self.body, 'plain'))
alternative.attach(self._mimetext(self.html, 'html'))
msg.attach(alternative)
if self.charset:
msg['Subject'] = Header(self.subject, encoding)
else:
msg['Subject'] = self.subject
sender = self.sender or default_from
if sender is not None:
msg['From'] = sanitize_address(sender, encoding)
msg['To'] = ', '.join(list(set(sanitize_addresses(self.recipients, encoding))))
msg['Date'] = formatdate(self.date, localtime=True)
# see RFC 5322 section 3.6.4.
msg['Message-ID'] = self.msgId
if self.cc:
msg['Cc'] = ', '.join(list(set(sanitize_addresses(self.cc, encoding))))
if self.reply_to:
msg['Reply-To'] = sanitize_address(self.reply_to, encoding)
if self.extra_headers:
for k, v in self.extra_headers.items():
msg[k] = v
for attachment in attachments:
f = MIMEBase(*attachment.content_type.split('/'))
f.set_payload(attachment.data)
encode_base64(f)
try:
attachment.filename and attachment.filename.encode('ascii')
except UnicodeEncodeError:
filename = attachment.filename
if not PY3:
filename = filename.encode('utf8')
f.add_header('Content-Disposition', attachment.disposition,
filename=('UTF8', '', filename))
else:
f.add_header('Content-Disposition', '%s;filename=%s' %
(attachment.disposition, attachment.filename))
for key, value in attachment.headers:
f.add_header(key, value)
msg.attach(f)
return msg.as_string()
|
Creates the email
|
def td_waveform_to_fd_waveform(waveform, out=None, length=None,
buffer_length=100):
""" Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at t=0 and the length of the waveform should be
shorter than the desired length of the FrequencySeries (times 2 - 1)
so that zeroes can be suitably pre- and post-pended before FFTing.
If given, out is a memory array to be used as the output of the FFT.
If not given memory is allocated internally.
If present the length of the returned FrequencySeries is determined
from the length out. If out is not given the length can be provided
expicitly, or it will be chosen as the nearest power of 2. If choosing
length explicitly the waveform length + buffer_length is used when
choosing the nearest binary number so that some zero padding is always
added.
"""
# Figure out lengths and set out if needed
if out is None:
if length is None:
N = pnutils.nearest_larger_binary_number(len(waveform) + \
buffer_length)
n = int(N//2) + 1
else:
n = length
N = (n-1)*2
out = zeros(n, dtype=complex_same_precision_as(waveform))
else:
n = len(out)
N = (n-1)*2
delta_f = 1. / (N * waveform.delta_t)
# total duration of the waveform
tmplt_length = len(waveform) * waveform.delta_t
if len(waveform) > N:
err_msg = "The time domain template is longer than the intended "
err_msg += "duration in the frequency domain. This situation is "
err_msg += "not supported in this function. Please shorten the "
err_msg += "waveform appropriately before calling this function or "
err_msg += "increase the allowed waveform length. "
err_msg += "Waveform length (in samples): {}".format(len(waveform))
err_msg += ". Intended length: {}.".format(N)
raise ValueError(err_msg)
# for IMR templates the zero of time is at max amplitude (merger)
# thus the start time is minus the duration of the template from
# lower frequency cutoff to merger, i.e. minus the 'chirp time'
tChirp = - float( waveform.start_time ) # conversion from LIGOTimeGPS
waveform.resize(N)
k_zero = int(waveform.start_time / waveform.delta_t)
waveform.roll(k_zero)
htilde = FrequencySeries(out, delta_f=delta_f, copy=False)
fft(waveform.astype(real_same_precision_as(htilde)), htilde)
htilde.length_in_time = tmplt_length
htilde.chirp_length = tChirp
return htilde
|
Convert a time domain into a frequency domain waveform by FFT.
As a waveform is assumed to "wrap" in the time domain one must be
careful to ensure the waveform goes to 0 at both "boundaries". To
ensure this is done correctly the waveform must have the epoch set such
the merger time is at t=0 and the length of the waveform should be
shorter than the desired length of the FrequencySeries (times 2 - 1)
so that zeroes can be suitably pre- and post-pended before FFTing.
If given, out is a memory array to be used as the output of the FFT.
If not given memory is allocated internally.
If present the length of the returned FrequencySeries is determined
from the length out. If out is not given the length can be provided
expicitly, or it will be chosen as the nearest power of 2. If choosing
length explicitly the waveform length + buffer_length is used when
choosing the nearest binary number so that some zero padding is always
added.
|
def approve(self, peer_jid):
"""
(Pre-)approve a subscription request from `peer_jid`.
:param peer_jid: The peer to (pre-)approve.
This sends a ``"subscribed"`` presence to the peer; if the peer has
previously asked for a subscription, this will seal the deal and create
the subscription.
If the peer has not requested a subscription (yet), it is marked as
pre-approved by the server. A future subscription request by the peer
will then be confirmed by the server automatically.
.. note::
Pre-approval is an OPTIONAL feature in :rfc:`6121`. It is announced
as a stream feature.
"""
self.client.enqueue(
stanza.Presence(type_=structs.PresenceType.SUBSCRIBED,
to=peer_jid)
)
|
(Pre-)approve a subscription request from `peer_jid`.
:param peer_jid: The peer to (pre-)approve.
This sends a ``"subscribed"`` presence to the peer; if the peer has
previously asked for a subscription, this will seal the deal and create
the subscription.
If the peer has not requested a subscription (yet), it is marked as
pre-approved by the server. A future subscription request by the peer
will then be confirmed by the server automatically.
.. note::
Pre-approval is an OPTIONAL feature in :rfc:`6121`. It is announced
as a stream feature.
|
def save_state(self, fname=None):
"""Saves state to pickle"""
if not fname:
date = datetime.datetime.now().strftime("%Y-%m-%dT%Hh%Mm%Ss")
fname = date + "_energy_" + str(self.energy()) + ".state"
with open(fname, "wb") as fh:
pickle.dump(self.state, fh)
|
Saves state to pickle
|
def _read(self):
"""Get next packet from transport.
:return: parsed packet in a tuple with message type and payload
:rtype: :py:class:`collections.namedtuple`
"""
raw_response = self.transport.receive()
response = Packet.parse(raw_response)
# FIXME
if response.response_type == Packet.EVENT and response.event_type == "log":
# queue up any debug log messages, and get next
self.log_events.append(response)
# do something?
self._read()
else:
return response
|
Get next packet from transport.
:return: parsed packet in a tuple with message type and payload
:rtype: :py:class:`collections.namedtuple`
|
def get_paths_from_to(self, goobj_start, goid_end=None, dn0_up1=True):
"""Get a list of paths from goobj_start to either top or goid_end."""
paths = []
# Queue of terms to be examined (and storage for their paths)
working_q = cx.deque([[goobj_start]])
# Loop thru GO terms until we have examined all needed GO terms
adjfnc = self.adjdir[dn0_up1]
while working_q:
#print "WORKING QUEUE LEN({})".format(len(working_q))
path_curr = working_q.popleft()
goobj_curr = path_curr[-1]
go_adjlst = adjfnc(goobj_curr)
#print 'END', goid_end, goobj_curr
# If this GO term is the endpoint, Stop. Store path.
if (goid_end is not None and goobj_curr.id == goid_end) or \
(goid_end is None and not go_adjlst):
paths.append(path_curr)
# Else if this GO term is the not the end, add neighbors to path
else:
for go_neighbor in go_adjlst:
if go_neighbor not in path_curr:
#print "{}'s NEIGHBOR IS {}".format(goobj_curr.id, go_neighbor.id)
new_path = path_curr + [go_neighbor]
#sys.stdout.write(" {}'s {} {}\n".format(goobj_curr, up_dn, go_neighbor))
working_q.append(new_path)
#self.prt_paths(paths)
return paths
|
Get a list of paths from goobj_start to either top or goid_end.
|
def has(self, relation, operator=">=", count=1, boolean="and", extra=None):
"""
Add a relationship count condition to the query.
:param relation: The relation to count
:type relation: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:type: Builder
"""
if relation.find(".") >= 0:
return self._has_nested(relation, operator, count, boolean, extra)
relation = self._get_has_relation_query(relation)
query = relation.get_relation_count_query(
relation.get_related().new_query(), self
)
# TODO: extra query
if extra:
if callable(extra):
extra(query)
return self._add_has_where(
query.apply_scopes(), relation, operator, count, boolean
)
|
Add a relationship count condition to the query.
:param relation: The relation to count
:type relation: str
:param operator: The operator
:type operator: str
:param count: The count
:type count: int
:param boolean: The boolean value
:type boolean: str
:param extra: The extra query
:type extra: Builder or callable
:type: Builder
|
def _include_exclude(file_path, include=None, exclude=None):
"""Check if file matches one of include filters and not in exclude filter.
:param file_path: Path to the file.
:param include: Tuple containing patterns to which include from result.
:param exclude: Tuple containing patterns to which exclude from result.
"""
if exclude is not None and exclude:
for pattern in exclude:
if file_path.match(pattern):
return False
if include is not None and include:
for pattern in include:
if file_path.match(pattern):
return True
return False
return True
|
Check if file matches one of include filters and not in exclude filter.
:param file_path: Path to the file.
:param include: Tuple containing patterns to which include from result.
:param exclude: Tuple containing patterns to which exclude from result.
|
def make_links_absolute(self, base_url=None, resolve_base_href=True,
handle_failures=None):
"""
Make all links in the document absolute, given the
``base_url`` for the document (the full URL where the document
came from), or if no ``base_url`` is given, then the ``.base_url``
of the document.
If ``resolve_base_href`` is true, then any ``<base href>``
tags in the document are used *and* removed from the document.
If it is false then any such tag is ignored.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed.
"""
if base_url is None:
base_url = self.base_url
if base_url is None:
raise TypeError(
"No base_url given, and the document has no base_url")
if resolve_base_href:
self.resolve_base_href()
if handle_failures == 'ignore':
def link_repl(href):
try:
return urljoin(base_url, href)
except ValueError:
return href
elif handle_failures == 'discard':
def link_repl(href):
try:
return urljoin(base_url, href)
except ValueError:
return None
elif handle_failures is None:
def link_repl(href):
return urljoin(base_url, href)
else:
raise ValueError(
"unexpected value for handle_failures: %r" % handle_failures)
self.rewrite_links(link_repl)
|
Make all links in the document absolute, given the
``base_url`` for the document (the full URL where the document
came from), or if no ``base_url`` is given, then the ``.base_url``
of the document.
If ``resolve_base_href`` is true, then any ``<base href>``
tags in the document are used *and* removed from the document.
If it is false then any such tag is ignored.
If ``handle_failures`` is None (default), a failure to process
a URL will abort the processing. If set to 'ignore', errors
are ignored. If set to 'discard', failing URLs will be removed.
|
def execute_operation(self, method="GET", ops_path="", payload=""):
"""
Executes a Kubernetes operation using the specified method against a path.
This is part of the low-level API.
:Parameters:
- `method`: The HTTP method to use, defaults to `GET`
- `ops_path`: The path of the operation, for example, `/api/v1/events` which would result in an overall: `GET http://localhost:8080/api/v1/events`
- `payload`: The optional payload which is relevant for `POST` or `PUT` methods only
"""
operation_path_URL = "".join([self.api_server, ops_path])
logging.debug("%s %s" %(method, operation_path_URL))
if payload == "":
res = requests.request(method, operation_path_URL)
else:
logging.debug("PAYLOAD:\n%s" %(payload))
res = requests.request(method, operation_path_URL, data=payload)
logging.debug("RESPONSE:\n%s" %(res.json()))
return res
|
Executes a Kubernetes operation using the specified method against a path.
This is part of the low-level API.
:Parameters:
- `method`: The HTTP method to use, defaults to `GET`
- `ops_path`: The path of the operation, for example, `/api/v1/events` which would result in an overall: `GET http://localhost:8080/api/v1/events`
- `payload`: The optional payload which is relevant for `POST` or `PUT` methods only
|
def convertToPDF(self, from_page=0, to_page=-1, rotate=0):
"""Convert document to PDF selecting page range and optional rotation. Output bytes object."""
if self.isClosed or self.isEncrypted:
raise ValueError("operation illegal for closed / encrypted doc")
return _fitz.Document_convertToPDF(self, from_page, to_page, rotate)
|
Convert document to PDF selecting page range and optional rotation. Output bytes object.
|
def get_pages_from_id_list(id_list):
'''
Accepts: list of page ids
Returns: list of specific page objects
'''
page_list = []
for id_ in id_list:
try:
page_list.append(
Page.objects.get(id=id_).specific)
except ObjectDoesNotExist:
logging.error(
"Attempted to fetch non-existent"
" page with id of {}".format(id_))
return page_list
|
Accepts: list of page ids
Returns: list of specific page objects
|
def is_error(self):
""" Checks to see if the job errored out. """
qstat = self._grep_qstat('error')
err = self._grep_status('error')
if qstat and err:
return True
return False
|
Checks to see if the job errored out.
|
def connect(self):
"""
Connects to Redis
"""
logger.info("Connecting to Redis on {host}:{port}...".format(
host=self.host, port=self.port))
super(RedisSubscriber, self).connect()
logger.info("Successfully connected to Redis")
# Subscribe to channel
self.pubsub = self.client.pubsub()
self.pubsub.subscribe(self.channel)
logger.info("Subscribed to [{channel}] Redis channel".format(
channel=self.channel))
# Start listening
t = Thread(target=self.listen)
t.setDaemon(True)
t.start()
|
Connects to Redis
|
def _to_dict(self):
"""Return a json dictionary representing this model."""
_dict = {}
if hasattr(self, 'id') and self.id is not None:
_dict['id'] = self.id
if hasattr(self, 'metadata') and self.metadata is not None:
_dict['metadata'] = self.metadata
if hasattr(self, 'collection_id') and self.collection_id is not None:
_dict['collection_id'] = self.collection_id
if hasattr(self,
'result_metadata') and self.result_metadata is not None:
_dict['result_metadata'] = self.result_metadata._to_dict()
if hasattr(self, 'title') and self.title is not None:
_dict['title'] = self.title
if hasattr(self, '_additionalProperties'):
for _key in self._additionalProperties:
_value = getattr(self, _key, None)
if _value is not None:
_dict[_key] = _value
return _dict
|
Return a json dictionary representing this model.
|
def collapse_spaces(text):
"""Remove newlines, tabs and multiple spaces with single spaces."""
if not isinstance(text, six.string_types):
return text
return COLLAPSE_RE.sub(WS, text).strip(WS)
|
Remove newlines, tabs and multiple spaces with single spaces.
|
def _reverse_index(self):
"""
Move the cursor up one row in the same column. If the cursor is at the
first row, create a new row at the top.
"""
if self.y == 0:
# If the cursor is currently at the first row, then scroll the
# screen up.
self.display = [u" " * self.size[1]] + self.display[:-1]
else:
# If the cursor is anywhere other than the first row than just move
# it up by one row.
self.y -= 1
|
Move the cursor up one row in the same column. If the cursor is at the
first row, create a new row at the top.
|
def thumbnail(self, img_url, size, crop=None, bg=None, quality=85,
storage_type=None, bucket_name=None):
"""
:param img_url: url img - '/assets/media/summer.jpg'
:param size: size return thumb - '100x100'
:param crop: crop return thumb - 'fit' or None
:param bg: tuple color or None - (255, 255, 255, 0)
:param quality: JPEG quality 1-100
:param storage_type: either 's3' or None
:param bucket_name: s3 bucket name
:return: :thumb_url:
"""
width, height = [int(x) for x in size.split('x')]
thumb_size = (width, height)
url_path, img_name = os.path.split(img_url)
name, fm = os.path.splitext(img_name)
miniature = self._get_name(name, fm, size, crop, bg, quality)
original_filename = os.path.join(self.app.config['MEDIA_FOLDER'], url_path, img_name)
thumb_filename = os.path.join(self.app.config['MEDIA_THUMBNAIL_FOLDER'], url_path, miniature)
thumb_url = os.path.join(self.app.config['MEDIA_THUMBNAIL_URL'], url_path, miniature)
if not (storage_type and bucket_name):
return self._thumbnail_local(original_filename,
thumb_filename,
thumb_size,
thumb_url,
crop=crop,
bg=bg,
quality=quality)
else:
if storage_type != 's3':
raise ValueError('Storage type "%s" is invalid, the only supported storage type (apart from default local storage) is s3.' % storage_type)
return self._thumbnail_s3(original_filename,
thumb_filename,
thumb_size,
thumb_url,
bucket_name,
crop=crop,
bg=bg,
quality=quality)
|
:param img_url: url img - '/assets/media/summer.jpg'
:param size: size return thumb - '100x100'
:param crop: crop return thumb - 'fit' or None
:param bg: tuple color or None - (255, 255, 255, 0)
:param quality: JPEG quality 1-100
:param storage_type: either 's3' or None
:param bucket_name: s3 bucket name
:return: :thumb_url:
|
def _parse_variable(s: str, curr_row: int, curr_col: int) -> Tuple:
'''
$A,$2 <- constant col and row
$row,$2 <- current col, row 2
$A+1,$2 <- col A + 1 = 2, row 2
$row+1,$2 <- current col + 1, row 2
$A,$2-1 <-- col A, row 2 - 1 = 1
'''
def parse_expression(ss, curr_row, curr_col):
ss = ss.replace('$row', str(curr_row))
ss = ss.replace('$col', str(curr_col))
ss = ExcelExtractor._re_row_identifier.sub(
lambda x: str(ExcelExtractor._row_name_to_num(x.group()[1:])) if len(x.group()) > 0 else '', ss)
ss = ExcelExtractor._re_col_identifier.sub(
lambda x: str(ExcelExtractor._col_name_to_num(x.group()[1:])) if len(x.group()) > 0 else '', ss)
return eval(ss)
ss = s.split(',')
if len(ss) == 1:
return parse_expression(ss[0], curr_row, curr_col),
elif len(ss) == 2:
rr, cc = (ss[1], ss[0])
return parse_expression(rr, curr_row, curr_col), parse_expression(cc, curr_row, curr_col)
else:
raise ValueError('Invalid variable')
|
$A,$2 <- constant col and row
$row,$2 <- current col, row 2
$A+1,$2 <- col A + 1 = 2, row 2
$row+1,$2 <- current col + 1, row 2
$A,$2-1 <-- col A, row 2 - 1 = 1
|
def edit(self, **kwargs):
""" Edit an object.
Parameters:
kwargs (dict): Dict of settings to edit.
Example:
{'type': 1,
'id': movie.ratingKey,
'collection[0].tag.tag': 'Super',
'collection.locked': 0}
"""
if 'id' not in kwargs:
kwargs['id'] = self.ratingKey
if 'type' not in kwargs:
kwargs['type'] = utils.searchType(self.type)
part = '/library/sections/%s/all?%s' % (self.librarySectionID,
urlencode(kwargs))
self._server.query(part, method=self._server._session.put)
|
Edit an object.
Parameters:
kwargs (dict): Dict of settings to edit.
Example:
{'type': 1,
'id': movie.ratingKey,
'collection[0].tag.tag': 'Super',
'collection.locked': 0}
|
def extend_course(course, enterprise_customer, request):
"""
Extend a course with more details needed for the program landing page.
In particular, we add the following:
* `course_image_uri`
* `course_title`
* `course_level_type`
* `course_short_description`
* `course_full_description`
* `course_effort`
* `expected_learning_items`
* `staff`
"""
course_run_id = course['course_runs'][0]['key']
try:
catalog_api_client = CourseCatalogApiServiceClient(enterprise_customer.site)
except ImproperlyConfigured:
error_code = 'ENTPEV000'
LOGGER.error(
'CourseCatalogApiServiceClient is improperly configured. '
'Returned error code {error_code} to user {userid} '
'and enterprise_customer {enterprise_customer} '
'for course_run_id {course_run_id}'.format(
error_code=error_code,
userid=request.user.id,
enterprise_customer=enterprise_customer.uuid,
course_run_id=course_run_id,
)
)
messages.add_generic_error_message_with_code(request, error_code)
return ({}, error_code)
course_details, course_run_details = catalog_api_client.get_course_and_course_run(course_run_id)
if not course_details or not course_run_details:
error_code = 'ENTPEV001'
LOGGER.error(
'User {userid} of enterprise customer {enterprise_customer} encountered an error.'
'No course_details or course_run_details found for '
'course_run_id {course_run_id}. '
'The following error code reported to the user: {error_code}'.format(
userid=request.user.id,
enterprise_customer=enterprise_customer.uuid,
course_run_id=course_run_id,
error_code=error_code,
)
)
messages.add_generic_error_message_with_code(request, error_code)
return ({}, error_code)
weeks_to_complete = course_run_details['weeks_to_complete']
course_run_image = course_run_details['image'] or {}
course.update({
'course_image_uri': course_run_image.get('src', ''),
'course_title': course_run_details['title'],
'course_level_type': course_run_details.get('level_type', ''),
'course_short_description': course_run_details['short_description'] or '',
'course_full_description': clean_html_for_template_rendering(course_run_details['full_description'] or ''),
'expected_learning_items': course_details.get('expected_learning_items', []),
'staff': course_run_details.get('staff', []),
'course_effort': ungettext_min_max(
'{} hour per week',
'{} hours per week',
'{}-{} hours per week',
course_run_details['min_effort'] or None,
course_run_details['max_effort'] or None,
) or '',
'weeks_to_complete': ungettext(
'{} week',
'{} weeks',
weeks_to_complete
).format(weeks_to_complete) if weeks_to_complete else '',
})
return course, None
|
Extend a course with more details needed for the program landing page.
In particular, we add the following:
* `course_image_uri`
* `course_title`
* `course_level_type`
* `course_short_description`
* `course_full_description`
* `course_effort`
* `expected_learning_items`
* `staff`
|
def render(self, context, instance, placeholder):
''' Allows this plugin to use templates designed for a list of locations. '''
context = super(LocationListPlugin,self).render(context,instance,placeholder)
context['location_list'] = Location.objects.filter(status=Location.StatusChoices.active)
return context
|
Allows this plugin to use templates designed for a list of locations.
|
def make_backups(self, block_id):
"""
If we're doing backups on a regular basis, then
carry them out here if it is time to do so.
This method does nothing otherwise.
Return None on success
Abort on failure
"""
assert self.setup, "Not set up yet. Call .db_setup() first!"
# make a backup?
if self.backup_frequency is not None:
if (block_id % self.backup_frequency) == 0:
backup_dir = config.get_backups_directory(self.impl, self.working_dir)
if not os.path.exists(backup_dir):
try:
os.makedirs(backup_dir)
except Exception, e:
log.exception(e)
log.error("FATAL: failed to make backup directory '%s'" % backup_dir)
traceback.print_stack()
os.abort()
for p in self.get_state_paths(self.impl, self.working_dir):
if os.path.exists(p):
try:
pbase = os.path.basename(p)
backup_path = os.path.join(backup_dir, pbase + (".bak.{}".format(block_id - 1)))
if not os.path.exists(backup_path):
rc = sqlite3_backup(p, backup_path)
if not rc:
log.warning("Failed to back up as an SQLite db. Falling back to /bin/cp")
shutil.copy(p, backup_path)
else:
log.error("Will not overwrite '%s'" % backup_path)
except Exception, e:
log.exception(e)
log.error("FATAL: failed to back up '%s'" % p)
traceback.print_stack()
os.abort()
return
|
If we're doing backups on a regular basis, then
carry them out here if it is time to do so.
This method does nothing otherwise.
Return None on success
Abort on failure
|
def run(self, host=None, port=None, debug=None, use_reloader=None,
open_browser=False):
"""
Starts a server to render the README.
"""
if host is None:
host = self.config['HOST']
if port is None:
port = self.config['PORT']
if debug is None:
debug = self.debug
if use_reloader is None:
use_reloader = self.config['DEBUG_GRIP']
# Verify the server is not already running and start
with self._run_mutex:
if self._shutdown_event:
raise AlreadyRunningError()
self._shutdown_event = threading.Event()
# Authentication message
if self.auth and not self.quiet:
if isinstance(self.auth, tuple):
username, password = self.auth
auth_method = ('credentials: {0}'.format(username)
if username
else 'personal access token')
else:
auth_method = type(self.auth).__name__
print(' * Using', auth_method, file=sys.stderr)
# Get random port manually when needed ahead of time
if port == 0 and open_browser:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
port = sock.getsockname()[1]
sock.close()
# Open browser
browser_thread = (
start_browser_when_ready(host, port, self._shutdown_event)
if open_browser else None)
# Run local server
super(Grip, self).run(host, port, debug=debug,
use_reloader=use_reloader,
threaded=True)
# Signal to the polling and browser threads that they should exit
if not self.quiet:
print(' * Shutting down...')
self._shutdown_event.set()
# Wait for browser thread to finish
if browser_thread:
browser_thread.join()
# Cleanup
self._shutdown_event = None
|
Starts a server to render the README.
|
def select_inverse(self, name="default", executor=None):
"""Invert the selection, i.e. what is selected will not be, and vice versa
:param str name:
:param executor:
:return:
"""
def create(current):
return selections.SelectionInvert(current)
self._selection(create, name, executor=executor)
|
Invert the selection, i.e. what is selected will not be, and vice versa
:param str name:
:param executor:
:return:
|
def do_set_logical_switch_config(self, line):
"""set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode
"""
def f(p, args):
try:
target, lsw, key, value = args
except:
print("argument error")
return
# get switch id
o = p.get_config(target)
capable_switch_id = o.id
try:
capable_switch = ofc.OFCapableSwitchType(
id=capable_switch_id,
logical_switches=ofc.OFCapableSwitchLogicalSwitchesType(
switch=[ofc.OFLogicalSwitchType(
id=lsw,
**{key: value}
)]
)
)
except TypeError:
print("argument error")
return
try:
p.edit_config(target, capable_switch)
except Exception as e:
print(e)
self._request(line, f)
|
set_logical_switch_config <peer> <logical switch> <key> <value>
eg. set_logical_switch_config sw1 running LogicalSwitch7 \
lost-connection-behavior failStandaloneMode
|
async def load_message(obj, msg_type, msg=None, field_archiver=None):
"""
Loads message if the given type from the object.
Supports reading directly to existing message.
:param obj:
:param msg_type:
:param msg:
:param field_archiver:
:return:
"""
msg = msg_type() if msg is None else msg
fields = msg_type.f_specs() if msg_type else msg.__class__.f_specs()
for field in fields:
await load_message_field(obj, msg, field, field_archiver=field_archiver)
return msg
|
Loads message if the given type from the object.
Supports reading directly to existing message.
:param obj:
:param msg_type:
:param msg:
:param field_archiver:
:return:
|
def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,),
retry_message="", cmd_env=None):
"""Run a command and retry until success or max_retries is reached.
:param: cmd: str: The apt command to run.
:param: max_retries: int: The number of retries to attempt on a fatal
command. Defaults to CMD_RETRY_COUNT.
:param: retry_exitcodes: tuple: Optional additional exit codes to retry.
Defaults to retry on exit code 1.
:param: retry_message: str: Optional log prefix emitted during retries.
:param: cmd_env: dict: Environment variables to add to the command run.
"""
env = None
kwargs = {}
if cmd_env:
env = os.environ.copy()
env.update(cmd_env)
kwargs['env'] = env
if not retry_message:
retry_message = "Failed executing '{}'".format(" ".join(cmd))
retry_message += ". Will retry in {} seconds".format(CMD_RETRY_DELAY)
retry_count = 0
result = None
retry_results = (None,) + retry_exitcodes
while result in retry_results:
try:
# result = subprocess.check_call(cmd, env=env)
result = subprocess.check_call(cmd, **kwargs)
except subprocess.CalledProcessError as e:
retry_count = retry_count + 1
if retry_count > max_retries:
raise
result = e.returncode
log(retry_message)
time.sleep(CMD_RETRY_DELAY)
|
Run a command and retry until success or max_retries is reached.
:param: cmd: str: The apt command to run.
:param: max_retries: int: The number of retries to attempt on a fatal
command. Defaults to CMD_RETRY_COUNT.
:param: retry_exitcodes: tuple: Optional additional exit codes to retry.
Defaults to retry on exit code 1.
:param: retry_message: str: Optional log prefix emitted during retries.
:param: cmd_env: dict: Environment variables to add to the command run.
|
def _set_rsvp(self, v, load=False):
"""
Setter method for rsvp, mapped from YANG variable /mpls_state/rsvp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rsvp() directly.
YANG Description: MPLS RSVP Operational Information
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=rsvp.rsvp, is_container='container', presence=False, yang_name="rsvp", rest_name="rsvp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """rsvp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=rsvp.rsvp, is_container='container', presence=False, yang_name="rsvp", rest_name="rsvp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mpls-rsvp', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='container', is_config=False)""",
})
self.__rsvp = t
if hasattr(self, '_set'):
self._set()
|
Setter method for rsvp, mapped from YANG variable /mpls_state/rsvp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_rsvp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_rsvp() directly.
YANG Description: MPLS RSVP Operational Information
|
def queue(p_queue, host=None):
'''Construct a path to the queue dir for a queue'''
if host is not None:
return _path(_c.FSQ_QUEUE, root=_path(host, root=hosts(p_queue)))
return _path(p_queue, _c.FSQ_QUEUE)
|
Construct a path to the queue dir for a queue
|
def strace_set_buffer_size(self, size):
"""Sets the STRACE buffer size.
Args:
self (JLink): the ``JLink`` instance.
Returns:
``None``
Raises:
JLinkException: on error.
"""
size = ctypes.c_uint32(size)
res = self._dll.JLINK_STRACE_Control(enums.JLinkStraceCommand.SET_BUFFER_SIZE, size)
if res < 0:
raise errors.JLinkException('Failed to set the STRACE buffer size.')
return None
|
Sets the STRACE buffer size.
Args:
self (JLink): the ``JLink`` instance.
Returns:
``None``
Raises:
JLinkException: on error.
|
def allocate_port():
"""Allocate an unused port.
There is a small race condition here (between the time we allocate the
port, and the time it actually gets used), but for the purposes for which
this function gets used it isn't a problem in practice.
"""
sock = socket.socket()
try:
sock.bind(("localhost", 0))
return get_port(sock)
finally:
sock.close()
|
Allocate an unused port.
There is a small race condition here (between the time we allocate the
port, and the time it actually gets used), but for the purposes for which
this function gets used it isn't a problem in practice.
|
def getNetworkSummary(self, suid, verbose=None):
"""
Returns summary of collection containing the specified network.
:param suid: Cytoscape Collection/Subnetwork SUID
:param verbose: print more
:returns: 200: successful operation
"""
surl=self.___url
sv=surl.split('/')[-1]
surl=surl.rstrip(sv+'/')
response=api(url=surl+'/cyndex2/'+sv+'/networks/'+str(suid)+'', method="GET", verbose=verbose, parse_params=False)
return response
|
Returns summary of collection containing the specified network.
:param suid: Cytoscape Collection/Subnetwork SUID
:param verbose: print more
:returns: 200: successful operation
|
def js_click(self, selector, by=By.CSS_SELECTOR):
""" Clicks an element using pure JS. Does not use jQuery. """
selector, by = self.__recalculate_selector(selector, by)
if by == By.LINK_TEXT:
message = (
"Pure JavaScript doesn't support clicking by Link Text. "
"You may want to use self.jquery_click() instead, which "
"allows this with :contains(), assuming jQuery isn't blocked. "
"For now, self.js_click() will use a regular WebDriver click.")
logging.debug(message)
self.click(selector, by=by)
return
element = self.wait_for_element_present(
selector, by=by, timeout=settings.SMALL_TIMEOUT)
if self.is_element_visible(selector, by=by):
self.__demo_mode_highlight_if_active(selector, by)
if not self.demo_mode:
self.__scroll_to_element(element)
css_selector = self.convert_to_css_selector(selector, by=by)
css_selector = re.escape(css_selector)
css_selector = self.__escape_quotes_if_needed(css_selector)
self.__js_click(selector, by=by) # The real "magic" happens here
self.__demo_mode_pause_if_active()
|
Clicks an element using pure JS. Does not use jQuery.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.