code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def multi_ping(dest_addrs, timeout, retry=0, ignore_lookup_errors=False):
"""
Combine send and receive measurement into single function.
This offers a retry mechanism: Overall timeout time is divided by
number of retries. Additional ICMPecho packets are sent to those
addresses from which we have not received answers, yet.
The retry mechanism is useful, because individual ICMP packets may get
lost.
If 'retry' is set to 0 then only a single packet is sent to each
address.
If 'ignore_lookup_errors' is set then any issues with resolving target
names or looking up their address information will silently be ignored.
Those targets simply appear in the 'no_results' return list.
"""
retry = int(retry)
if retry < 0:
retry = 0
timeout = float(timeout)
if timeout < 0.1:
raise MultiPingError("Timeout < 0.1 seconds not allowed")
retry_timeout = float(timeout) / (retry + 1)
if retry_timeout < 0.1:
raise MultiPingError("Time between ping retries < 0.1 seconds")
mp = MultiPing(dest_addrs, ignore_lookup_errors=ignore_lookup_errors)
results = {}
retry_count = 0
while retry_count <= retry:
# Send a batch of pings
mp.send()
single_results, no_results = mp.receive(retry_timeout)
# Add the results from the last sending of pings to the overall results
results.update(single_results)
if not no_results:
# No addresses left? We are done.
break
retry_count += 1
return results, no_results
|
Combine send and receive measurement into single function.
This offers a retry mechanism: Overall timeout time is divided by
number of retries. Additional ICMPecho packets are sent to those
addresses from which we have not received answers, yet.
The retry mechanism is useful, because individual ICMP packets may get
lost.
If 'retry' is set to 0 then only a single packet is sent to each
address.
If 'ignore_lookup_errors' is set then any issues with resolving target
names or looking up their address information will silently be ignored.
Those targets simply appear in the 'no_results' return list.
|
def status(self):
"""Get the status of the responding member."""
status_request = etcdrpc.StatusRequest()
status_response = self.maintenancestub.Status(
status_request,
self.timeout,
credentials=self.call_credentials,
metadata=self.metadata
)
for m in self.members:
if m.id == status_response.leader:
leader = m
break
else:
# raise exception?
leader = None
return Status(status_response.version,
status_response.dbSize,
leader,
status_response.raftIndex,
status_response.raftTerm)
|
Get the status of the responding member.
|
def dprint(s):
'''Prints `s` with additional debugging informations'''
import inspect
frameinfo = inspect.stack()[1]
callerframe = frameinfo.frame
d = callerframe.f_locals
if (isinstance(s,str)):
val = eval(s, d)
else:
val = s
cc = frameinfo.code_context[0]
import re
regex = re.compile("dprint\((.*)\)")
res = regex.search(cc)
s = res.group(1)
text = ''
text += bcolors.OKBLUE + "At <{}>\n".format(str(frameinfo)) + bcolors.ENDC
text += bcolors.WARNING + "{}: ".format(s) + bcolors.ENDC
text += str(val)
text += str()
print(text)
|
Prints `s` with additional debugging informations
|
def default_targets(self):
"""Default targets for `dvc repro` and `dvc pipeline`."""
from dvc.stage import Stage
msg = "assuming default target '{}'.".format(Stage.STAGE_FILE)
logger.warning(msg)
return [Stage.STAGE_FILE]
|
Default targets for `dvc repro` and `dvc pipeline`.
|
def song(self):
"""the song associated with the project"""
if self._song is None:
self._song = Song(self._song_data)
return self._song
|
the song associated with the project
|
def iter_chain(cur):
"""Iterate over all of the chains in the database.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
Yields:
list: The chain.
"""
select = "SELECT nodes FROM chain"
for nodes, in cur.execute(select):
yield json.loads(nodes)
|
Iterate over all of the chains in the database.
Args:
cur (:class:`sqlite3.Cursor`):
An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement.
Yields:
list: The chain.
|
def parseinput(inputlist,outputname=None, atfile=None):
"""
Recursively parse user input based upon the irafglob
program and construct a list of files that need to be processed.
This program addresses the following deficiencies of the irafglob program::
parseinput can extract filenames from association tables
Returns
-------
This program will return a list of input files that will need to
be processed in addition to the name of any outfiles specified in
an association table.
Parameters
----------
inputlist - string
specification of input files using either wild-cards, @-file or
comma-separated list of filenames
outputname - string
desired name for output product to be created from the input files
atfile - object
function to use in interpreting the @-file columns that gets passed to irafglob
Returns
-------
files - list of strings
names of output files to be processed
newoutputname - string
name of output file to be created.
See Also
--------
stsci.tools.irafglob
"""
# Initalize some variables
files = [] # list used to store names of input files
newoutputname = outputname # Outputname returned to calling program.
# The value of outputname is only changed
# if it had a value of 'None' on input.
# We can use irafglob to parse the input. If the input wasn't
# an association table, it needs to be either a wildcard, '@' file,
# or comma seperated list.
files = irafglob(inputlist, atfile=atfile)
# Now that we have expanded the inputlist into a python list
# containing the list of input files, it is necessary to examine
# each of the files to make sure none of them are association tables.
#
# If an association table is found, the entries should be read
# Determine if the input is an association table
for file in files:
if (checkASN(file) == True):
# Create a list to store the files extracted from the
# association tiable
assoclist = []
# The input is an association table
try:
# Open the association table
assocdict = readASNTable(file, None, prodonly=False)
except:
errorstr = "###################################\n"
errorstr += "# #\n"
errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n"
errorstr += str(file)+'\n'
errorstr += "# DURING FILE PARSING. #\n"
errorstr += "# #\n"
errorstr += "# Please determine if the file is #\n"
errorstr += "# in the current directory and #\n"
errorstr += "# that it has been properly #\n"
errorstr += "# formatted. #\n"
errorstr += "# #\n"
errorstr += "# This error message is being #\n"
errorstr += "# generated from within the #\n"
errorstr += "# parseinput.py module. #\n"
errorstr += "# #\n"
errorstr += "###################################\n"
raise ValueError(errorstr)
# Extract the output name from the association table if None
# was provided on input.
if outputname is None:
newoutputname = assocdict['output']
# Loop over the association dictionary to extract the input
# file names.
for f in assocdict['order']:
assoclist.append(fileutil.buildRootname(f))
# Remove the name of the association table from the list of files
files.remove(file)
# Append the list of filenames generated from the association table
# to the master list of input files.
files.extend(assoclist)
# Return the list of the input files and the output name if provided in an association.
return files, newoutputname
|
Recursively parse user input based upon the irafglob
program and construct a list of files that need to be processed.
This program addresses the following deficiencies of the irafglob program::
parseinput can extract filenames from association tables
Returns
-------
This program will return a list of input files that will need to
be processed in addition to the name of any outfiles specified in
an association table.
Parameters
----------
inputlist - string
specification of input files using either wild-cards, @-file or
comma-separated list of filenames
outputname - string
desired name for output product to be created from the input files
atfile - object
function to use in interpreting the @-file columns that gets passed to irafglob
Returns
-------
files - list of strings
names of output files to be processed
newoutputname - string
name of output file to be created.
See Also
--------
stsci.tools.irafglob
|
def highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]] = None, color: Optional[str]=None):
"""Adds a highlight tag to the given nodes.
:param graph: A BEL graph
:param nodes: The nodes to add a highlight tag on
:param color: The color to highlight (use something that works with CSS)
"""
color = color or NODE_HIGHLIGHT_DEFAULT_COLOR
for node in nodes if nodes is not None else graph:
graph.node[node][NODE_HIGHLIGHT] = color
|
Adds a highlight tag to the given nodes.
:param graph: A BEL graph
:param nodes: The nodes to add a highlight tag on
:param color: The color to highlight (use something that works with CSS)
|
def get_paged(self, res, **kwargs):
"""
This call is equivalent to ``res(**kwargs)``, only it retrieves all pages
and returns the results joined into a single iterable. The advantage over
retrieving everything at once is that the result can be consumed immediately.
:param res: what resource to connect to
:param kwargs: filters to be used
::
# Example: Iterate over all active releases
for release in client.get_paged(client['releases']._, active=True):
...
This function is obsolete and not recommended.
"""
if self.page_size is not None:
kwargs['page_size'] = self.page_size
if self.page_size <= 0:
# If page_size <= 0, pagination will be disable.
return res(**kwargs)
def worker():
kwargs['page'] = 1
while True:
response = res(**kwargs)
yield response['results']
if response['next']:
kwargs['page'] += 1
else:
break
return itertools.chain.from_iterable(worker())
|
This call is equivalent to ``res(**kwargs)``, only it retrieves all pages
and returns the results joined into a single iterable. The advantage over
retrieving everything at once is that the result can be consumed immediately.
:param res: what resource to connect to
:param kwargs: filters to be used
::
# Example: Iterate over all active releases
for release in client.get_paged(client['releases']._, active=True):
...
This function is obsolete and not recommended.
|
def child_context(self, *args, **kwargs):
"""
Context setup first in child process, before returning from start() call in parent.
Result is passed in as argument of update
:return:
"""
# Now we can extract config values
expected_args = {
'services': [],
'topics': [], # bwcompat
'subscribers': [],
'publishers': [],
'params': [],
# TODO : all of them !
}
ifargs = {
arg: self.config_handler.config.get(arg.upper(), default) for arg, default in expected_args.items()
}
# overriding with kwargs
ifargs.update(kwargs)
# storing passed args in config in case of reset
# calling setup on child context enter call
if self.interface is None:
#for BW compat
# TODO : change API to use the child_context from pyzmp coprocess
self.setup(*args, **ifargs)
with super(PyrosBase, self).child_context(*args, **kwargs) as cctxt:
yield cctxt
|
Context setup first in child process, before returning from start() call in parent.
Result is passed in as argument of update
:return:
|
def generate_dumper(self, mapfile, names):
"""
Build dumpdata commands
"""
return self.build_template(mapfile, names, self._dumpdata_template)
|
Build dumpdata commands
|
def locations_for(self, city_name, country=None, matching='nocase'):
"""
Returns a list of Location objects corresponding to
the int IDs and relative toponyms and 2-chars country of the cities
matching the provided city name.
The rule for identifying matchings is according to the provided
`matching` parameter value.
If `country` is provided, the search is restricted to the cities of
the specified country.
:param country: two character str representing the country where to
search for the city. Defaults to `None`, which means: search in all
countries.
:param matching: str among `exact` (literal, case-sensitive matching),
`nocase` (literal, case-insensitive matching) and `like` (matches cities
whose name contains as a substring the string fed to the function, no
matter the case). Defaults to `nocase`.
:raises ValueError if the value for `matching` is unknown
:return: list of `weatherapi25.location.Location` objects
"""
if not city_name:
return []
if matching not in self.MATCHINGS:
raise ValueError("Unknown type of matching: "
"allowed values are %s" % ", ".join(self.MATCHINGS))
if country is not None and len(country) != 2:
raise ValueError("Country must be a 2-char string")
splits = self._filter_matching_lines(city_name, country, matching)
return [Location(item[0], float(item[3]), float(item[2]),
int(item[1]), item[4]) for item in splits]
|
Returns a list of Location objects corresponding to
the int IDs and relative toponyms and 2-chars country of the cities
matching the provided city name.
The rule for identifying matchings is according to the provided
`matching` parameter value.
If `country` is provided, the search is restricted to the cities of
the specified country.
:param country: two character str representing the country where to
search for the city. Defaults to `None`, which means: search in all
countries.
:param matching: str among `exact` (literal, case-sensitive matching),
`nocase` (literal, case-insensitive matching) and `like` (matches cities
whose name contains as a substring the string fed to the function, no
matter the case). Defaults to `nocase`.
:raises ValueError if the value for `matching` is unknown
:return: list of `weatherapi25.location.Location` objects
|
def to_dict(self):
"""Return state as a dictionary.
Result can be used to serialize the instance and reconstitute
it later using :meth:`from_dict`.
:rtype: dict
"""
session = self._get_session()
snapshot = self._get_snapshot()
return {
"session_id": session._session_id,
"transaction_id": snapshot._transaction_id,
}
|
Return state as a dictionary.
Result can be used to serialize the instance and reconstitute
it later using :meth:`from_dict`.
:rtype: dict
|
def postprocess_authors_init(self, entry):
"""
If only a single author was found, ensure that ``authors_init`` is
nonetheless a list.
"""
if type(entry.authors_init) is not list:
entry.authors_init = [entry.authors_init]
|
If only a single author was found, ensure that ``authors_init`` is
nonetheless a list.
|
def generate_func_call(name, args=None, kwargs=None):
"""
Generates code to call a function.
Args:
name (str): The function name.
args (list[str]): Each positional argument.
kwargs (list[tuple]): Each tuple is (arg: str, value: str). If
value is None, then the keyword argument is omitted. Otherwise,
if the value is not a string, then str() is called on it.
Returns:
str: Code to call a function.
"""
all_args = []
if args:
all_args.extend(args)
if kwargs:
all_args.extend('{}={}'.format(k, v)
for k, v in kwargs if v is not None)
return '{}({})'.format(name, ', '.join(all_args))
|
Generates code to call a function.
Args:
name (str): The function name.
args (list[str]): Each positional argument.
kwargs (list[tuple]): Each tuple is (arg: str, value: str). If
value is None, then the keyword argument is omitted. Otherwise,
if the value is not a string, then str() is called on it.
Returns:
str: Code to call a function.
|
def select(self, choice_scores):
"""
Groups the frozen sets by algorithm and first chooses an algorithm based
on the traditional UCB1 criteria.
Next, from that algorithm's frozen sets, makes the final set choice.
"""
# choose algorithm using a bandit
alg_scores = {}
for algorithm, choices in self.by_algorithm.items():
# only make arms for algorithms that have options
if not set(choices) & set(choice_scores.keys()):
continue
# sum up lists to get a list of all the scores from any run of this
# algorithm
sublists = [choice_scores.get(c, []) for c in choices]
alg_scores[algorithm] = sum(sublists, [])
best_algorithm = self.bandit(alg_scores)
# now use only the frozen sets from the chosen algorithm
best_subset = self.by_algorithm[best_algorithm]
normal_ucb1 = UCB1(choices=best_subset)
return normal_ucb1.select(choice_scores)
|
Groups the frozen sets by algorithm and first chooses an algorithm based
on the traditional UCB1 criteria.
Next, from that algorithm's frozen sets, makes the final set choice.
|
def _compute_weights(self):
""" Computes the weights for the scaled unscented Kalman filter. """
n = self.n
c = 1. / (n + 1)
self.Wm = np.full(n + 1, c)
self.Wc = self.Wm
|
Computes the weights for the scaled unscented Kalman filter.
|
def get_signing_keys(eid, keydef, key_file):
"""
If the *key_file* file exists then read the keys from there, otherwise
create the keys and store them a file with the name *key_file*.
:param eid: The ID of the entity that the keys belongs to
:param keydef: What keys to create
:param key_file: A file name
:return: A :py:class:`oidcmsg.key_jar.KeyJar` instance
"""
if os.path.isfile(key_file):
kj = KeyJar()
kj.import_jwks(json.loads(open(key_file, 'r').read()), eid)
else:
kj = build_keyjar(keydef)[1]
# make it know under both names
fp = open(key_file, 'w')
fp.write(json.dumps(kj.export_jwks()))
fp.close()
kj.issuer_keys[eid] = kj.issuer_keys['']
return kj
|
If the *key_file* file exists then read the keys from there, otherwise
create the keys and store them a file with the name *key_file*.
:param eid: The ID of the entity that the keys belongs to
:param keydef: What keys to create
:param key_file: A file name
:return: A :py:class:`oidcmsg.key_jar.KeyJar` instance
|
def set_input(self, p_name, value):
"""Set a Step's input variable to a certain value.
The value comes either from a workflow input or output of a previous
step.
Args:
name (str): the name of the Step input
value (str): the name of the output variable that provides the
value for this input.
Raises:
ValueError: The name provided is not a valid input name for this
Step.
"""
name = self.python_names.get(p_name)
if p_name is None or name not in self.get_input_names():
raise ValueError('Invalid input "{}"'.format(p_name))
self.step_inputs[name] = value
|
Set a Step's input variable to a certain value.
The value comes either from a workflow input or output of a previous
step.
Args:
name (str): the name of the Step input
value (str): the name of the output variable that provides the
value for this input.
Raises:
ValueError: The name provided is not a valid input name for this
Step.
|
def _find_feature_type(self, feature_name, eopatch):
""" Iterates over allowed feature types of given EOPatch and tries to find a feature type for which there
exists a feature with given name
:return: A feature type or `None` if such feature type does not exist
:rtype: FeatureType or None
"""
for feature_type in self.allowed_feature_types:
if feature_type.has_dict() and feature_name in eopatch[feature_type]:
return feature_type
return None
|
Iterates over allowed feature types of given EOPatch and tries to find a feature type for which there
exists a feature with given name
:return: A feature type or `None` if such feature type does not exist
:rtype: FeatureType or None
|
def _processArgs(self, entry, *_args, **_kwargs):
""" Given an entry, positional and keyword arguments, figure out what
the query-string options, payload and api arguments are.
"""
# We need the args to be a list so we can mutate them
args = list(_args)
kwargs = copy.deepcopy(_kwargs)
reqArgs = entry['args']
routeParams = {}
query = {}
payload = None
kwApiArgs = {}
paginationHandler = None
paginationLimit = None
# There are three formats for calling methods:
# 1. method(v1, v1, payload)
# 2. method(payload, k1=v1, k2=v2)
# 3. method(payload=payload, query=query, params={k1: v1, k2: v2})
if len(kwargs) == 0:
if 'input' in entry and len(args) == len(reqArgs) + 1:
payload = args.pop()
if len(args) != len(reqArgs):
log.debug(args)
log.debug(reqArgs)
raise exceptions.TaskclusterFailure('Incorrect number of positional arguments')
log.debug('Using method(v1, v2, payload) calling convention')
else:
# We're considering kwargs which are the api route parameters to be
# called 'flat' because they're top level keys. We're special
# casing calls which have only api-arg kwargs and possibly a payload
# value and handling them directly.
isFlatKwargs = True
if len(kwargs) == len(reqArgs):
for arg in reqArgs:
if not kwargs.get(arg, False):
isFlatKwargs = False
break
if 'input' in entry and len(args) != 1:
isFlatKwargs = False
if 'input' not in entry and len(args) != 0:
isFlatKwargs = False
else:
pass # We're using payload=, query= and param=
else:
isFlatKwargs = False
# Now we're going to handle the two types of kwargs. The first is
# 'flat' ones, which are where the api params
if isFlatKwargs:
if 'input' in entry:
payload = args.pop()
kwApiArgs = kwargs
log.debug('Using method(payload, k1=v1, k2=v2) calling convention')
warnings.warn(
"The method(payload, k1=v1, k2=v2) calling convention will soon be deprecated",
PendingDeprecationWarning
)
else:
kwApiArgs = kwargs.get('params', {})
payload = kwargs.get('payload', None)
query = kwargs.get('query', {})
paginationHandler = kwargs.get('paginationHandler', None)
paginationLimit = kwargs.get('paginationLimit', None)
log.debug('Using method(payload=payload, query=query, params={k1: v1, k2: v2}) calling convention')
if 'input' in entry and isinstance(payload, type(None)):
raise exceptions.TaskclusterFailure('Payload is required')
# These all need to be rendered down to a string, let's just check that
# they are up front and fail fast
for arg in args:
if not isinstance(arg, six.string_types) and not isinstance(arg, int):
raise exceptions.TaskclusterFailure(
'Positional arg "%s" to %s is not a string or int' % (arg, entry['name']))
for name, arg in six.iteritems(kwApiArgs):
if not isinstance(arg, six.string_types) and not isinstance(arg, int):
raise exceptions.TaskclusterFailure(
'KW arg "%s: %s" to %s is not a string or int' % (name, arg, entry['name']))
if len(args) > 0 and len(kwApiArgs) > 0:
raise exceptions.TaskclusterFailure('Specify either positional or key word arguments')
# We know for sure that if we don't give enough arguments that the call
# should fail. We don't yet know if we should fail because of two many
# arguments because we might be overwriting positional ones with kw ones
if len(reqArgs) > len(args) + len(kwApiArgs):
raise exceptions.TaskclusterFailure(
'%s takes %d args, only %d were given' % (
entry['name'], len(reqArgs), len(args) + len(kwApiArgs)))
# We also need to error out when we have more positional args than required
# because we'll need to go through the lists of provided and required args
# at the same time. Not disqualifying early means we'll get IndexErrors if
# there are more positional arguments than required
if len(args) > len(reqArgs):
raise exceptions.TaskclusterFailure('%s called with too many positional args',
entry['name'])
i = 0
for arg in args:
log.debug('Found a positional argument: %s', arg)
routeParams[reqArgs[i]] = arg
i += 1
log.debug('After processing positional arguments, we have: %s', routeParams)
routeParams.update(kwApiArgs)
log.debug('After keyword arguments, we have: %s', routeParams)
if len(reqArgs) != len(routeParams):
errMsg = '%s takes %s args, %s given' % (
entry['name'],
','.join(reqArgs),
routeParams.keys())
log.error(errMsg)
raise exceptions.TaskclusterFailure(errMsg)
for reqArg in reqArgs:
if reqArg not in routeParams:
errMsg = '%s requires a "%s" argument which was not provided' % (
entry['name'], reqArg)
log.error(errMsg)
raise exceptions.TaskclusterFailure(errMsg)
return routeParams, payload, query, paginationHandler, paginationLimit
|
Given an entry, positional and keyword arguments, figure out what
the query-string options, payload and api arguments are.
|
def write(self, text, fg='black', bg='white'):
'''write to the console'''
if isinstance(text, str):
sys.stdout.write(text)
else:
sys.stdout.write(str(text))
sys.stdout.flush()
|
write to the console
|
def parse_kal_scan(kal_out):
"""Parse kal band scan output."""
kal_data = []
scan_band = determine_scan_band(kal_out)
scan_gain = determine_scan_gain(kal_out)
scan_device = determine_device(kal_out)
sample_rate = determine_sample_rate(kal_out)
chan_detect_threshold = determine_chan_detect_threshold(kal_out)
for line in kal_out.splitlines():
if "chan:" in line:
p_line = line.split(' ')
chan = str(p_line[1])
modifier = str(p_line[3])
power = str(p_line[5])
mod_raw = str(p_line[4]).replace(')\tpower:', '')
base_raw = str((p_line[2]).replace('(', ''))
mod_freq = herz_me(mod_raw)
base_freq = herz_me(base_raw)
final_freq = to_eng(determine_final_freq(base_freq, modifier,
mod_freq))
kal_run = {"channel": chan,
"base_freq": base_freq,
"mod_freq": mod_freq,
"modifier": modifier,
"final_freq": final_freq,
"power": power,
"band": scan_band,
"gain": scan_gain,
"device": scan_device,
"sample_rate": sample_rate,
"channel_detect_threshold": chan_detect_threshold}
kal_data.append(kal_run.copy())
return kal_data
|
Parse kal band scan output.
|
def advection(scalar, wind, deltas):
r"""Calculate the advection of a scalar field by the wind.
The order of the dimensions of the arrays must match the order in which
the wind components are given. For example, if the winds are given [u, v],
then the scalar and wind arrays must be indexed as x,y (which puts x as the
rows, not columns).
Parameters
----------
scalar : N-dimensional array
Array (with N-dimensions) with the quantity to be advected.
wind : sequence of arrays
Length M sequence of N-dimensional arrays. Represents the flow,
with a component of the wind in each dimension. For example, for
horizontal advection, this could be a list: [u, v], where u and v
are each a 2-dimensional array.
deltas : sequence of float or ndarray
A (length M) sequence containing the grid spacing(s) in each dimension. If using
arrays, in each array there should be one item less than the size of `scalar` along the
applicable axis.
Returns
-------
N-dimensional array
An N-dimensional array containing the advection at all grid points.
"""
# This allows passing in a list of wind components or an array.
wind = _stack(wind)
# If we have more than one component, we need to reverse the order along the first
# dimension so that the wind components line up with the
# order of the gradients from the ..., y, x ordered array.
if wind.ndim > scalar.ndim:
wind = wind[::-1]
# Gradient returns a list of derivatives along each dimension. We convert
# this to an array with dimension as the first index. Reverse the deltas to line up
# with the order of the dimensions.
grad = _stack(gradient(scalar, deltas=deltas[::-1]))
# Make them be at least 2D (handling the 1D case) so that we can do the
# multiply and sum below
grad, wind = atleast_2d(grad, wind)
return (-grad * wind).sum(axis=0)
|
r"""Calculate the advection of a scalar field by the wind.
The order of the dimensions of the arrays must match the order in which
the wind components are given. For example, if the winds are given [u, v],
then the scalar and wind arrays must be indexed as x,y (which puts x as the
rows, not columns).
Parameters
----------
scalar : N-dimensional array
Array (with N-dimensions) with the quantity to be advected.
wind : sequence of arrays
Length M sequence of N-dimensional arrays. Represents the flow,
with a component of the wind in each dimension. For example, for
horizontal advection, this could be a list: [u, v], where u and v
are each a 2-dimensional array.
deltas : sequence of float or ndarray
A (length M) sequence containing the grid spacing(s) in each dimension. If using
arrays, in each array there should be one item less than the size of `scalar` along the
applicable axis.
Returns
-------
N-dimensional array
An N-dimensional array containing the advection at all grid points.
|
def list_files(self, dataset_id, glob=".", is_dir=False):
"""
List matched filenames in a dataset on Citrination.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.
:type is_dir: bool
:return: A list of filepaths in the dataset matching the provided glob.
:rtype: list of strings
"""
data = {
"list": {
"glob": glob,
"isDir": is_dir
}
}
return self._get_success_json(self._post_json(routes.list_files(dataset_id), data, failure_message="Failed to list files for dataset {}".format(dataset_id)))['files']
|
List matched filenames in a dataset on Citrination.
:param dataset_id: The ID of the dataset to search for files.
:type dataset_id: int
:param glob: A pattern which will be matched against files in the dataset.
:type glob: str
:param is_dir: A boolean indicating whether or not the pattern should match against the beginning of paths in the dataset.
:type is_dir: bool
:return: A list of filepaths in the dataset matching the provided glob.
:rtype: list of strings
|
def _peg_pose_in_hole_frame(self):
"""
A helper function that takes in a named data field and returns the pose of that
object in the base frame.
"""
# World frame
peg_pos_in_world = self.sim.data.get_body_xpos("cylinder")
peg_rot_in_world = self.sim.data.get_body_xmat("cylinder").reshape((3, 3))
peg_pose_in_world = T.make_pose(peg_pos_in_world, peg_rot_in_world)
# World frame
hole_pos_in_world = self.sim.data.get_body_xpos("hole")
hole_rot_in_world = self.sim.data.get_body_xmat("hole").reshape((3, 3))
hole_pose_in_world = T.make_pose(hole_pos_in_world, hole_rot_in_world)
world_pose_in_hole = T.pose_inv(hole_pose_in_world)
peg_pose_in_hole = T.pose_in_A_to_pose_in_B(
peg_pose_in_world, world_pose_in_hole
)
return peg_pose_in_hole
|
A helper function that takes in a named data field and returns the pose of that
object in the base frame.
|
def reversals(self, transfer_id, data={}, **kwargs):
""""
Get all Reversal Transfer from given id
Args:
transfer_id :
Id for which reversal transfer object has to be fetched
Returns:
Transfer Dict
"""
url = "{}/{}/reversals".format(self.base_url, transfer_id)
return self.get_url(url, data, **kwargs)
|
Get all Reversal Transfer from given id
Args:
transfer_id :
Id for which reversal transfer object has to be fetched
Returns:
Transfer Dict
|
def list(self, *args, **kwargs):
"""
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'})
"""
return [
self.prepare_model(n)
for n in self.client.api.nodes(*args, **kwargs)
]
|
List swarm nodes.
Args:
filters (dict): Filters to process on the nodes list. Valid
filters: ``id``, ``name``, ``membership`` and ``role``.
Default: ``None``
Returns:
A list of :py:class:`Node` objects.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
Example:
>>> client.nodes.list(filters={'role': 'manager'})
|
def perform(self, command, params=None, **kwargs):
"""Execute a command.
Arguments can be supplied either as a dictionary or as keyword
arguments. Examples:
stc.perform('LoadFromXml', {'filename':'config.xml'})
stc.perform('LoadFromXml', filename='config.xml')
Arguments:
command -- Command to execute.
params -- Optional. Dictionary of parameters (name-value pairs).
kwargs -- Optional keyword arguments (name=value pairs).
Return:
Data from command.
"""
self._check_session()
if not params:
params = {}
if kwargs:
params.update(kwargs)
params['command'] = command
status, data = self._rest.post_request('perform', None, params)
return data
|
Execute a command.
Arguments can be supplied either as a dictionary or as keyword
arguments. Examples:
stc.perform('LoadFromXml', {'filename':'config.xml'})
stc.perform('LoadFromXml', filename='config.xml')
Arguments:
command -- Command to execute.
params -- Optional. Dictionary of parameters (name-value pairs).
kwargs -- Optional keyword arguments (name=value pairs).
Return:
Data from command.
|
def unpublish(self):
"""
Unpublishes the resource.
"""
self._client._delete(
"{0}/published".format(
self.__class__.base_url(
self.sys['space'].id,
self.sys['id'],
environment_id=self._environment_id
),
),
headers=self._update_headers()
)
return self.reload()
|
Unpublishes the resource.
|
def is_uncertainty_edition_allowed(self, analysis_brain):
"""Checks if the edition of the uncertainty field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
"""
# Only allow to edit the uncertainty if result edition is allowed
if not self.is_result_edition_allowed(analysis_brain):
return False
# Get the ananylsis object
obj = api.get_object(analysis_brain)
# Manual setting of uncertainty is not allowed
if not obj.getAllowManualUncertainty():
return False
# Result is a detection limit -> uncertainty setting makes no sense!
if obj.getDetectionLimitOperand() in [LDL, UDL]:
return False
return True
|
Checks if the edition of the uncertainty field is allowed
:param analysis_brain: Brain that represents an analysis
:return: True if the user can edit the result field, otherwise False
|
def byte_bounds_offset(self):
"""Return start and end offsets of this segment's data into the
base array's data.
This ignores the byte order index. Arrays using the byte order index
will have the entire base array's raw data.
"""
if self.data.base is None:
if self.is_indexed:
basearray = self.data.np_data
else:
basearray = self.data
return 0, len(basearray)
return int(self.data_start - self.base_start), int(self.data_end - self.base_start)
|
Return start and end offsets of this segment's data into the
base array's data.
This ignores the byte order index. Arrays using the byte order index
will have the entire base array's raw data.
|
def imagetransformer_sep_channels_12l_16h_imagenet_large():
"""separate rgb embeddings."""
hparams = imagetransformer_sep_channels_8l_8h()
hparams.num_hidden_layers = 12
hparams.batch_size = 1
hparams.filter_size = 2048
hparams.num_heads = 16
hparams.learning_rate_warmup_steps = 16000
hparams.sampling_method = "random"
hparams.learning_rate = 0.1
return hparams
|
separate rgb embeddings.
|
def set_many(self, mapping, timeout=None):
"""Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
:returns: Whether all given keys have been set.
:rtype: boolean
"""
rv = True
for key, value in _items(mapping):
if not self.set(key, value, timeout):
rv = False
return rv
|
Sets multiple keys and values from a mapping.
:param mapping: a mapping with the keys/values to set.
:param timeout: the cache timeout for the key (if not specified,
it uses the default timeout).
:returns: Whether all given keys have been set.
:rtype: boolean
|
def artUrl(self):
""" Return the first first art url starting on the most specific for that item."""
art = self.firstAttr('art', 'grandparentArt')
return self._server.url(art, includeToken=True) if art else None
|
Return the first first art url starting on the most specific for that item.
|
def refresh_fqdn_cache(force=False):
'''
Force refreshes all FQDNs used in rules.
force
Forces all fqdn refresh
CLI Example:
.. code-block:: bash
salt '*' panos.refresh_fqdn_cache
salt '*' panos.refresh_fqdn_cache force=True
'''
if not isinstance(force, bool):
raise CommandExecutionError("Force option must be boolean.")
if force:
query = {'type': 'op',
'cmd': '<request><system><fqdn><refresh><force>yes</force></refresh></fqdn></system></request>'}
else:
query = {'type': 'op', 'cmd': '<request><system><fqdn><refresh></refresh></fqdn></system></request>'}
return __proxy__['panos.call'](query)
|
Force refreshes all FQDNs used in rules.
force
Forces all fqdn refresh
CLI Example:
.. code-block:: bash
salt '*' panos.refresh_fqdn_cache
salt '*' panos.refresh_fqdn_cache force=True
|
def is_switched_on(self, refresh=False):
"""Get armed state.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
"""
if refresh:
self.refresh()
val = self.get_value('Armed')
return val == '1'
|
Get armed state.
Refresh data from Vera if refresh is True, otherwise use local cache.
Refresh is only needed if you're not using subscriptions.
|
def until(self, condition, is_true=None, message=""):
"""Repeatedly runs condition until its return value evalutes to true,
or its timeout expires or the predicate evaluates to true.
This will poll at the given interval until the given timeout
is reached, or the predicate or conditions returns true. A
condition that returns null or does not evaluate to true will
fully elapse its timeout before raising a
``TimeoutException``.
If an exception is raised in the condition function and it's
not ignored, this function will raise immediately. If the
exception is ignored, it will continue polling for the
condition until it returns successfully or a
``TimeoutException`` is raised.
The return value of the callable `condition` will be returned
once it completes successfully.
:param condition: A callable function whose return value will
be returned by this function if it evalutes to true.
:param is_true: An optional predicate that will terminate and
return when it evalutes to False. It should be a function
that will be passed `clock` and an end time. The default
predicate will terminate a wait when the clock elapses the
timeout.
:param message: An optional message to include in the
exception's message if this function times out.
:returns: Return value of `condition`.
"""
rv = None
last_exc = None
until = is_true or until_pred
start = self.clock.now
while not until(self.clock, self.end):
try:
rv = condition()
except (KeyboardInterrupt, SystemExit) as e:
raise e
except self.exceptions as e:
last_exc = sys.exc_info()
if isinstance(rv, bool) and not rv:
time.sleep(self.interval)
continue
if rv is not None:
return rv
self.clock.sleep(self.interval)
if message:
message = " with message: %s" % message
raise TimeoutException(
"Timed out after %s seconds%s" %
((self.clock.now - start), message), cause=last_exc)
|
Repeatedly runs condition until its return value evalutes to true,
or its timeout expires or the predicate evaluates to true.
This will poll at the given interval until the given timeout
is reached, or the predicate or conditions returns true. A
condition that returns null or does not evaluate to true will
fully elapse its timeout before raising a
``TimeoutException``.
If an exception is raised in the condition function and it's
not ignored, this function will raise immediately. If the
exception is ignored, it will continue polling for the
condition until it returns successfully or a
``TimeoutException`` is raised.
The return value of the callable `condition` will be returned
once it completes successfully.
:param condition: A callable function whose return value will
be returned by this function if it evalutes to true.
:param is_true: An optional predicate that will terminate and
return when it evalutes to False. It should be a function
that will be passed `clock` and an end time. The default
predicate will terminate a wait when the clock elapses the
timeout.
:param message: An optional message to include in the
exception's message if this function times out.
:returns: Return value of `condition`.
|
def syncView(self):
"""
Syncs all the items to the view.
"""
if not self.updatesEnabled():
return
for item in self.topLevelItems():
try:
item.syncView(recursive=True)
except AttributeError:
continue
|
Syncs all the items to the view.
|
def write_branch_data(self, file):
""" Writes branch data as CSV.
"""
writer = self._get_writer(file)
writer.writerow(BRANCH_ATTRS)
for branch in self.case.branches:
writer.writerow([getattr(branch, a) for a in BRANCH_ATTRS])
|
Writes branch data as CSV.
|
def _authenticate(self, params, headers):
"""
Method that simply adjusts authentication credentials for the
request.
`params` is the querystring of the request.
`headers` is the header of the request.
If auth instance is not provided to this class, this method simply
returns without doing anything.
"""
if self.authentication:
user = self.authentication.get_user()
params.update({'auth': user.firebase_auth_token})
headers.update(self.authentication.authenticator.HEADERS)
|
Method that simply adjusts authentication credentials for the
request.
`params` is the querystring of the request.
`headers` is the header of the request.
If auth instance is not provided to this class, this method simply
returns without doing anything.
|
def to_pb(self):
"""Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
"""
union = table_v2_pb2.GcRule.Union(rules=[rule.to_pb() for rule in self.rules])
return table_v2_pb2.GcRule(union=union)
|
Converts the union into a single GC rule as a protobuf.
:rtype: :class:`.table_v2_pb2.GcRule`
:returns: The converted current object.
|
def keyword_hookup(self, noteId, keywords):
'''
Unhook existing cross-linking entries.
'''
try:
self.cur.execute("DELETE FROM notekeyword WHERE noteid=?", [noteId])
except:
self.error("ERROR: cannot unhook previous keywords")
# Now, hook up new the entries, one by one.
for keyword in keywords:
keyword = keyword.decode('utf-8')
self.fyi(" inserting keyword:", keyword)
# Make sure the keyword table contains the word in question.
keywordId = self.con.execute("SELECT keywordId FROM keyword WHERE keyword = ?;", [keyword]).fetchone()
try:
if keywordId:
self.fyi(" (existing keyword with id: %s)" % keywordId)
keywordId = keywordId[0]
else:
self.fyi(" (new keyword)")
self.cur.execute("INSERT INTO keyword(keyword) VALUES (?);", [keyword])
keywordId = self.cur.lastrowid
# Finally, do the actual hookup for this word.
self.con.execute("INSERT INTO notekeyword(noteId, keywordID) VALUES(?, ?)", [noteId, keywordId])
except:
self.error("error hooking up keyword '%s'" % keyword)
self.con.commit()
|
Unhook existing cross-linking entries.
|
def from_offset(self, value):
'''
The starting from index of the hits to return. Defaults to 0.
'''
if not self.params:
self.params = dict({'from':value})
return self
self.params['from'] = value
return self
|
The starting from index of the hits to return. Defaults to 0.
|
def adjustHeight(self, column):
"""
Adjusts the height for this item based on the columna and its text.
:param column | <int>
"""
tree = self.treeWidget()
if not tree:
return
w = tree.width()
if tree.verticalScrollBar().isVisible():
w -= tree.verticalScrollBar().width()
doc = QtGui.QTextDocument()
doc.setTextWidth(w)
doc.setHtml(self.text(0))
height = doc.documentLayout().documentSize().height()
self.setFixedHeight(height+2)
|
Adjusts the height for this item based on the columna and its text.
:param column | <int>
|
def interpret(self, msg):
""" Load input """
slides = msg.get('slides', [])
self.cache = msg.get('folder', '.')
self.gallery = msg.get('gallery', ['..'])
self.finder.interpret(dict(galleries=self.gallery))
# in case slides is a generator, turn it into a list
# since I am going to go through it twice
slides = [slide for slide in slides]
logname = msg.get('logname')
if logname:
self.write_slide_list(logname, slides)
# Now spin through slides again
for slide in slides:
image = self.draw_slide(slide)
heading = slide['heading']['text']
filename = self.get_image_name(heading)
self.cache_image(filename, image)
# fixme -- just return info in slides.txt as list of dicts
return
|
Load input
|
def dns_resource_reference(self):
"""Instance depends on the API version:
* 2018-05-01: :class:`DnsResourceReferenceOperations<azure.mgmt.dns.v2018_05_01.operations.DnsResourceReferenceOperations>`
"""
api_version = self._get_api_version('dns_resource_reference')
if api_version == '2018-05-01':
from .v2018_05_01.operations import DnsResourceReferenceOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version)))
|
Instance depends on the API version:
* 2018-05-01: :class:`DnsResourceReferenceOperations<azure.mgmt.dns.v2018_05_01.operations.DnsResourceReferenceOperations>`
|
def set(self, key, value, **kw):
"""Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
"""
self.impl.set(key, value, **self._get_cache_kw(kw, None))
|
Place a value in the cache.
:param key: the value's key.
:param value: the value.
:param \**kw: cache configuration arguments.
|
def in_use(self):
"""Returns True if there is a :class:`State` object that uses this
``Flow``"""
state = State.objects.filter(flow=self).first()
return bool(state)
|
Returns True if there is a :class:`State` object that uses this
``Flow``
|
def choice_voters_changed_update_cache(
sender, instance, action, reverse, model, pk_set, **kwargs):
"""Update cache when choice.voters changes."""
if action not in ('post_add', 'post_remove', 'post_clear'):
# post_clear is not handled, because clear is called in
# django.db.models.fields.related.ReverseManyRelatedObjects.__set__
# before setting the new order
return
if model == User:
assert type(instance) == Choice
choices = [instance]
if pk_set:
users = list(User.objects.filter(pk__in=pk_set))
else:
users = []
else:
if pk_set:
choices = list(Choice.objects.filter(pk__in=pk_set))
else:
choices = []
users = [instance]
from .tasks import update_cache_for_instance
for choice in choices:
update_cache_for_instance('Choice', choice.pk, choice)
for user in users:
update_cache_for_instance('User', user.pk, user)
|
Update cache when choice.voters changes.
|
def add(self, layer, verbosity = 0, position = None):
"""
Adds a layer. Layer verbosity is optional (default 0).
"""
layer._verbosity = verbosity
layer._maxRandom = self._maxRandom
layer.minTarget = 0.0
layer.maxTarget = 1.0
layer.minActivation = 0.0
layer.maxActivation = 1.0
if position == None:
self.layers.append(layer)
else:
self.layers.insert(position, layer)
self.layersByName[layer.name] = layer
|
Adds a layer. Layer verbosity is optional (default 0).
|
def generate_enums(basename, xml):
'''generate main header per XML file'''
directory = os.path.join(basename, '''enums''')
mavparse.mkdir_p(directory)
for en in xml.enum:
f = open(os.path.join(directory, en.name+".java"), mode='w')
t.write(f, '''
/* AUTO-GENERATED FILE. DO NOT MODIFY.
*
* This class was automatically generated by the
* java mavlink generator tool. It should not be modified by hand.
*/
package com.MAVLink.enums;
/**
* ${description}
*/
public class ${name} {
${{entry: public static final int ${name} = ${value}; /* ${description} |${{param:${description}| }} */
}}
}
''', en)
f.close()
|
generate main header per XML file
|
def check_output(self, cmd):
"""Calls a command through SSH and returns its output.
"""
ret, output = self._call(cmd, True)
if ret != 0: # pragma: no cover
raise RemoteCommandFailure(command=cmd, ret=ret)
logger.debug("Output: %r", output)
return output
|
Calls a command through SSH and returns its output.
|
def get_collection_in_tower(self, key):
"""
Get items from this collection that are added in the current tower.
"""
new = tf.get_collection(key)
old = set(self.original.get(key, []))
# persist the order in new
return [x for x in new if x not in old]
|
Get items from this collection that are added in the current tower.
|
def sign_ssh_challenge(self, blob, identity):
"""Sign given blob using a private key on the device."""
msg = _parse_ssh_blob(blob)
log.debug('%s: user %r via %r (%r)',
msg['conn'], msg['user'], msg['auth'], msg['key_type'])
log.debug('nonce: %r', msg['nonce'])
fp = msg['public_key']['fingerprint']
log.debug('fingerprint: %s', fp)
log.debug('hidden challenge size: %d bytes', len(blob))
log.info('please confirm user "%s" login to "%s" using %s...',
msg['user'].decode('ascii'), identity.to_string(),
self.device)
with self.device:
return self.device.sign(blob=blob, identity=identity)
|
Sign given blob using a private key on the device.
|
def get_kernel_id(self):
"""
Get the kernel id of the client.
Return a str with the kernel id or None.
"""
sessions_url = self.get_session_url()
sessions_req = requests.get(sessions_url).content.decode()
sessions = json.loads(sessions_req)
if os.name == 'nt':
path = self.path.replace('\\', '/')
else:
path = self.path
for session in sessions:
notebook_path = session.get('notebook', {}).get('path')
if notebook_path is not None and notebook_path == path:
kernel_id = session['kernel']['id']
return kernel_id
|
Get the kernel id of the client.
Return a str with the kernel id or None.
|
def _find_link_target(self, tarinfo):
"""Find the target member of a symlink or hardlink member in the
archive.
"""
if tarinfo.issym():
# Always search the entire archive.
linkname = os.path.dirname(tarinfo.name) + "/" + tarinfo.linkname
limit = None
else:
# Search the archive before the link, because a hard link is
# just a reference to an already archived file.
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if member is None:
raise KeyError("linkname %r not found" % linkname)
return member
|
Find the target member of a symlink or hardlink member in the
archive.
|
def set(self, name: str, value: Any) -> None:
"""
Stores a knowledge item in the agent knowledge base.
Args:
name (str): name of the item
value (Any): value of the item
"""
self.agent.set(name, value)
|
Stores a knowledge item in the agent knowledge base.
Args:
name (str): name of the item
value (Any): value of the item
|
def table(self) -> Table:
"""
Returns a SQLAlchemy :class:`Table` object. This is either the
:class:`Table` object that was used for initialization, or one that
was constructed from the ``tablename`` plus the ``metadata``.
"""
if self._table is not None:
return self._table
assert self._metadata, (
"Must specify metadata (in constructor or via set_metadata()/"
"set_metadata_if_none() before you can get a Table from a "
"tablename"
)
for table in self._metadata.tables.values(): # type: Table
if table.name == self._tablename:
return table
raise ValueError("No table named {!r} is present in the "
"metadata".format(self._tablename))
|
Returns a SQLAlchemy :class:`Table` object. This is either the
:class:`Table` object that was used for initialization, or one that
was constructed from the ``tablename`` plus the ``metadata``.
|
def vcsNodeState_originator_switch_info_switchIpV6Address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
vcsNodeState = ET.SubElement(config, "vcsNodeState", xmlns="urn:brocade.com:mgmt:brocade-vcs")
originator_switch_info = ET.SubElement(vcsNodeState, "originator-switch-info")
switchIpV6Address = ET.SubElement(originator_switch_info, "switchIpV6Address")
switchIpV6Address.text = kwargs.pop('switchIpV6Address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def _encode_params(data):
"""Encode parameters in a piece of data.
If the data supplied is a dictionary, encodes each parameter in it, and
returns a list of tuples containing the encoded parameters, and a urlencoded
version of that.
Otherwise, assumes the data is already encoded appropriately, and
returns it twice.
"""
if hasattr(data, '__iter__'):
data = dict(data)
if hasattr(data, 'items'):
result = []
for k, vs in data.items():
for v in isinstance(vs, list) and vs or [vs]:
result.append((k.encode('utf-8') if isinstance(k, unicode) else k,
v.encode('utf-8') if isinstance(v, unicode) else v))
return result, urllib.urlencode(result, doseq=True)
else:
return data, data
|
Encode parameters in a piece of data.
If the data supplied is a dictionary, encodes each parameter in it, and
returns a list of tuples containing the encoded parameters, and a urlencoded
version of that.
Otherwise, assumes the data is already encoded appropriately, and
returns it twice.
|
def friedmanchisquare(*args):
"""
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
"""
k = len(args)
if k < 3:
raise ValueError('Less than 3 levels. Friedman test not appropriate.')
n = len(args[0])
data = map(zip, tuple(args))
for i in range(len(data)):
data[i] = rankdata(data[i])
ssbn = 0
for i in range(k):
ssbn = ssbn + sum(args[i]) ** 2
chisq = 12.0 / (k * n * (k + 1)) * ssbn - 3 * n * (k + 1)
return chisq, chisqprob(chisq, k - 1)
|
Friedman Chi-Square is a non-parametric, one-way within-subjects
ANOVA. This function calculates the Friedman Chi-square test for repeated
measures and returns the result, along with the associated probability
value. It assumes 3 or more repeated measures. Only 3 levels requires a
minimum of 10 subjects in the study. Four levels requires 5 subjects per
level(??).
Usage: lfriedmanchisquare(*args)
Returns: chi-square statistic, associated p-value
|
def delete(self):
"""Deletes the object from the datastore."""
pipeline = self.db.pipeline()
self._delete_from_indices(pipeline)
self._delete_membership(pipeline)
pipeline.delete(self.key())
pipeline.execute()
|
Deletes the object from the datastore.
|
def state(name):
'''
Returns the state of the container
name
Container name or ID
**RETURN DATA**
A string representing the current state of the container (either
``running``, ``paused``, or ``stopped``)
CLI Example:
.. code-block:: bash
salt myminion docker.state mycontainer
'''
contextkey = 'docker.state.{0}'.format(name)
if contextkey in __context__:
return __context__[contextkey]
__context__[contextkey] = _get_state(inspect_container(name))
return __context__[contextkey]
|
Returns the state of the container
name
Container name or ID
**RETURN DATA**
A string representing the current state of the container (either
``running``, ``paused``, or ``stopped``)
CLI Example:
.. code-block:: bash
salt myminion docker.state mycontainer
|
def _pfp__parse(self, stream, save_offset=False):
"""Parse the IO stream for this enum
:stream: An IO stream that can be read from
:returns: The number of bytes parsed
"""
res = super(Enum, self)._pfp__parse(stream, save_offset)
if self._pfp__value in self.enum_vals:
self.enum_name = self.enum_vals[self._pfp__value]
else:
self.enum_name = "?? UNK_ENUM ??"
return res
|
Parse the IO stream for this enum
:stream: An IO stream that can be read from
:returns: The number of bytes parsed
|
def stop(self):
"""Stop streaming samples from device and delete samples buffer"""
if not self.device.is_streaming:
return
self.device.stop_stream()
self._writer.close()
self._bins = None
self._repeats = None
self._base_buffer_size = None
self._max_buffer_size = None
self._buffer_repeats = None
self._buffer = None
self._tune_delay = None
self._reset_stream = None
self._psd = None
self._writer = None
|
Stop streaming samples from device and delete samples buffer
|
def reconstitute_path(drive, folders):
"""Reverts a tuple from `get_path_components` into a path.
:param drive: A drive (eg 'c:'). Only applicable for NT systems
:param folders: A list of folder names
:return: A path comprising the drive and list of folder names. The path terminate
with a `os.path.sep` *only* if it is a root directory
"""
reconstituted = os.path.join(drive, os.path.sep, *folders)
return reconstituted
|
Reverts a tuple from `get_path_components` into a path.
:param drive: A drive (eg 'c:'). Only applicable for NT systems
:param folders: A list of folder names
:return: A path comprising the drive and list of folder names. The path terminate
with a `os.path.sep` *only* if it is a root directory
|
def map_transaction(txn):
"""
Maps a single transaction row to a dictionary.
Parameters
----------
txn : pd.DataFrame
A single transaction object to convert to a dictionary.
Returns
-------
dict
Mapped transaction.
"""
if isinstance(txn['sid'], dict):
sid = txn['sid']['sid']
symbol = txn['sid']['symbol']
else:
sid = txn['sid']
symbol = txn['sid']
return {'sid': sid,
'symbol': symbol,
'price': txn['price'],
'order_id': txn['order_id'],
'amount': txn['amount'],
'commission': txn['commission'],
'dt': txn['dt']}
|
Maps a single transaction row to a dictionary.
Parameters
----------
txn : pd.DataFrame
A single transaction object to convert to a dictionary.
Returns
-------
dict
Mapped transaction.
|
def download_file(cls, url, local_file_name=None, force=False, chunk_size=1024):
"""
Download file from a given url
"""
local_file_name = local_file_name if local_file_name else url.split('/')[-1]
filepath = os.path.join(cls.data_path, local_file_name)
if not os.path.exists(filepath) or force:
try:
headers = requests.head(url, allow_redirects=True).headers
length = headers.get('Content-Length')
logger.info("Starting download of {} file with {} bytes ...".format(url, length))
widgets = [
'Downloading file please wait...', progressbar.Percentage(),
' ', progressbar.Bar(),
' ', progressbar.ETA(),
' ', progressbar.FileTransferSpeed(),
]
bar = progressbar.ProgressBar(widgets=widgets, max_value=int(length) + chunk_size).start()
r = requests.get(url, stream=True)
with open(filepath, 'wb') as f:
total_chunk = 0
for chunk in r.iter_content(chunk_size):
if chunk:
f.write(chunk)
total_chunk += chunk_size
bar.update(total_chunk)
bar.finish()
except:
if os.path.exists(filepath):
os.remove(filepath)
raise
return filepath
|
Download file from a given url
|
def mget(self, body, doc_type=None, index=None, params=None):
"""
Get multiple documents based on an index, type (optional) and ids.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html>`_
:arg body: Document identifiers; can be either `docs` (containing full
document information) or `ids` (when index and type is provided in
the URL.
:arg index: The name of the index
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg stored_fields: A comma-separated list of stored fields to return in
the response
"""
if body in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument 'body'.")
return self.transport.perform_request(
"GET", _make_path(index, doc_type, "_mget"), params=params, body=body
)
|
Get multiple documents based on an index, type (optional) and ids.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-multi-get.html>`_
:arg body: Document identifiers; can be either `docs` (containing full
document information) or `ids` (when index and type is provided in
the URL.
:arg index: The name of the index
:arg _source: True or false to return the _source field or not, or a
list of fields to return
:arg _source_exclude: A list of fields to exclude from the returned
_source field
:arg _source_include: A list of fields to extract and return from the
_source field
:arg preference: Specify the node or shard the operation should be
performed on (default: random)
:arg realtime: Specify whether to perform the operation in realtime or
search mode
:arg refresh: Refresh the shard containing the document before
performing the operation
:arg routing: Specific routing value
:arg stored_fields: A comma-separated list of stored fields to return in
the response
|
def init_app(self, app):
"""Initialize an application.
:param app: A :class:`~flask.Flask` app.
"""
if not hasattr(app, 'extensions'):
app.extensions = {}
app.extensions['nav'] = self
app.add_template_global(self.elems, 'nav')
# register some renderers
for args in self._renderers:
register_renderer(app, *args)
|
Initialize an application.
:param app: A :class:`~flask.Flask` app.
|
def idle_print_status(self):
'''print out statistics every 10 seconds from idle loop'''
now = time.time()
if (now - self.last_idle_status_printed_time) >= 10:
print(self.status())
self.last_idle_status_printed_time = now
|
print out statistics every 10 seconds from idle loop
|
def pairwise_kernel(self, X, Y):
"""Function to use with :func:`sklearn.metrics.pairwise.pairwise_kernels`
Parameters
----------
X : array, shape = (n_features,)
Y : array, shape = (n_features,)
Returns
-------
similarity : float
Similarities are normalized to be within [0, 1]
"""
check_is_fitted(self, 'X_fit_')
if X.shape[0] != Y.shape[0]:
raise ValueError('X and Y have different number of features')
val = pairwise_continuous_ordinal_kernel(X[self._numeric_columns], Y[self._numeric_columns],
self._numeric_ranges)
if len(self._nominal_columns) > 0:
val += pairwise_nominal_kernel(X[self._nominal_columns].astype(numpy.int8),
Y[self._nominal_columns].astype(numpy.int8))
val /= X.shape[0]
return val
|
Function to use with :func:`sklearn.metrics.pairwise.pairwise_kernels`
Parameters
----------
X : array, shape = (n_features,)
Y : array, shape = (n_features,)
Returns
-------
similarity : float
Similarities are normalized to be within [0, 1]
|
def wait_until_page_ready(page_object, timeout=WTF_TIMEOUT_MANAGER.NORMAL):
"""Waits until document.readyState == Complete (e.g. ready to execute javascript commands)
Args:
page_object (PageObject) : PageObject class
Kwargs:
timeout (number) : timeout period
"""
try:
do_until(lambda: page_object.webdriver.execute_script("return document.readyState").lower()
== 'complete', timeout)
except wait_utils.OperationTimeoutError:
raise PageUtilOperationTimeoutError(
"Timeout occurred while waiting for page to be ready.")
|
Waits until document.readyState == Complete (e.g. ready to execute javascript commands)
Args:
page_object (PageObject) : PageObject class
Kwargs:
timeout (number) : timeout period
|
def newgroups_gen(self, timestamp):
"""Generator for the NEWGROUPS command.
Generates a list of newsgroups created on the server since the specified
timestamp.
See <http://tools.ietf.org/html/rfc3977#section-7.3>
Args:
timestamp: Datetime object giving 'created since' datetime.
Yields:
A tuple containing the name, low water mark, high water mark,
and status for the newsgroup.
Note: If the datetime object supplied as the timestamp is naive (tzinfo
is None) then it is assumed to be given as GMT.
"""
if timestamp.tzinfo:
ts = timestamp.asttimezone(date.TZ_GMT)
else:
ts = timestamp.replace(tzinfo=date.TZ_GMT)
args = ts.strftime("%Y%m%d %H%M%S %Z")
code, message = self.command("NEWGROUPS", args)
if code != 231:
raise NNTPReplyError(code, message)
for line in self.info_gen(code, message):
yield utils.parse_newsgroup(line)
|
Generator for the NEWGROUPS command.
Generates a list of newsgroups created on the server since the specified
timestamp.
See <http://tools.ietf.org/html/rfc3977#section-7.3>
Args:
timestamp: Datetime object giving 'created since' datetime.
Yields:
A tuple containing the name, low water mark, high water mark,
and status for the newsgroup.
Note: If the datetime object supplied as the timestamp is naive (tzinfo
is None) then it is assumed to be given as GMT.
|
def autocomplete(query, country=None, hurricanes=False, cities=True, timeout=5):
"""Make an autocomplete API request
This can be used to find cities and/or hurricanes by name
:param string query: city
:param string country: restrict search to a specific country. Must be a two letter country code
:param boolean hurricanes: whether to search for hurricanes or not
:param boolean cities: whether to search for cities or not
:param integer timeout: timeout of the api request
:returns: result of the autocomplete API request
:rtype: dict
"""
data = {}
data['query'] = quote(query)
data['country'] = country or ''
data['hurricanes'] = 1 if hurricanes else 0
data['cities'] = 1 if cities else 0
data['format'] = 'JSON'
r = requests.get(AUTOCOMPLETE_URL.format(**data), timeout=timeout)
results = json.loads(r.content)['RESULTS']
return results
|
Make an autocomplete API request
This can be used to find cities and/or hurricanes by name
:param string query: city
:param string country: restrict search to a specific country. Must be a two letter country code
:param boolean hurricanes: whether to search for hurricanes or not
:param boolean cities: whether to search for cities or not
:param integer timeout: timeout of the api request
:returns: result of the autocomplete API request
:rtype: dict
|
def update(self):
"""Update the status of the range setting."""
self._controller.update(self._id, wake_if_asleep=False)
data = self._controller.get_charging_params(self._id)
if data and (time.time() - self.__manual_update_time > 60):
self.__maxrange_state = data['charge_to_max_range']
|
Update the status of the range setting.
|
def cluster_centers_(self):
"""
Searches for or creates cluster centers for the specified clustering
algorithm. This algorithm ensures that that the centers are
appropriately drawn and scaled so that distance between clusters are
maintained.
"""
# TODO: Handle agglomerative clustering and LDA
for attr in ('cluster_centers_',):
try:
return getattr(self.estimator, attr)
except AttributeError:
continue
raise AttributeError(
"could not find or make cluster_centers_ for {}".format(
self.estimator.__class__.__name__
))
|
Searches for or creates cluster centers for the specified clustering
algorithm. This algorithm ensures that that the centers are
appropriately drawn and scaled so that distance between clusters are
maintained.
|
def writeSentence(self, cmd, *words):
"""
Write encoded sentence.
:param cmd: Command word.
:param words: Aditional words.
"""
encoded = self.encodeSentence(cmd, *words)
self.log('<---', cmd, *words)
self.transport.write(encoded)
|
Write encoded sentence.
:param cmd: Command word.
:param words: Aditional words.
|
def example_value(self):
"""
If we're an instance of a Serializable, fall back to its
`example_instance()` method.
"""
from .serializable import Serializable
inst = self._static_example_value()
if inst is tr.Undefined and issubclass(self.klass, Serializable):
return self.klass.example_instance()
return inst
|
If we're an instance of a Serializable, fall back to its
`example_instance()` method.
|
def log_call(call_name):
"""Log the API call to the logger."""
def decorator(f):
@wraps(f)
def wrapper(*args, **kw):
instance = args[0]
instance.logger.info(call_name, {"content": request.get_json()})
return f(*args, **kw)
return wrapper
return decorator
|
Log the API call to the logger.
|
def plot_grid(step):
"""Plot cell position and thickness.
The figure is call grid_N.pdf where N is replace by the step index.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
"""
rad = get_rprof(step, 'r')[0]
drad = get_rprof(step, 'dr')[0]
_, unit = step.sdat.scale(1, 'm')
if unit:
unit = ' ({})'.format(unit)
fig, (ax1, ax2) = plt.subplots(2, sharex=True)
ax1.plot(rad, '-ko')
ax1.set_ylabel('$r$' + unit)
ax2.plot(drad, '-ko')
ax2.set_ylabel('$dr$' + unit)
ax2.set_xlim([-0.5, len(rad) - 0.5])
ax2.set_xlabel('Cell number')
misc.saveplot(fig, 'grid', step.istep)
|
Plot cell position and thickness.
The figure is call grid_N.pdf where N is replace by the step index.
Args:
step (:class:`~stagpy.stagyydata._Step`): a step of a StagyyData
instance.
|
def convert_column(self, values):
"""Normalize values."""
assert all(values >= 0), 'Cannot normalize a column with negatives'
total = sum(values)
if total > 0:
return values / total
else:
return values
|
Normalize values.
|
def positionToIntensityUncertaintyForPxGroup(image, std, y0, y1, x0, x1):
'''
like positionToIntensityUncertainty
but calculated average uncertainty for an area [y0:y1,x0:x1]
'''
fy, fx = y1 - y0, x1 - x0
if fy != fx:
raise Exception('averaged area need to be square ATM')
image = _coarsenImage(image, fx)
k = _kSizeFromStd(std)
y0 = int(round(y0 / fy))
x0 = int(round(x0 / fx))
arr = image[y0 - k:y0 + k, x0 - k:x0 + k]
U = positionToIntensityUncertainty(arr, std / fx, std / fx)
return U[k:-k, k:-k]
|
like positionToIntensityUncertainty
but calculated average uncertainty for an area [y0:y1,x0:x1]
|
def jwt_optional(fn):
"""
A decorator to optionally protect a Flask endpoint
If an access token in present in the request, this will call the endpoint
with :func:`~flask_jwt_extended.get_jwt_identity` having the identity
of the access token. If no access token is present in the request,
this endpoint will still be called, but
:func:`~flask_jwt_extended.get_jwt_identity` will return `None` instead.
If there is an invalid access token in the request (expired, tampered with,
etc), this will still call the appropriate error handler instead of allowing
the endpoint to be called as if there is no access token in the request.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
verify_jwt_in_request_optional()
return fn(*args, **kwargs)
return wrapper
|
A decorator to optionally protect a Flask endpoint
If an access token in present in the request, this will call the endpoint
with :func:`~flask_jwt_extended.get_jwt_identity` having the identity
of the access token. If no access token is present in the request,
this endpoint will still be called, but
:func:`~flask_jwt_extended.get_jwt_identity` will return `None` instead.
If there is an invalid access token in the request (expired, tampered with,
etc), this will still call the appropriate error handler instead of allowing
the endpoint to be called as if there is no access token in the request.
|
def get_percentile(self, percentile):
"""Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Return:
The Data Collection value at the input percentile
"""
assert 0 <= percentile <= 100, \
'percentile must be between 0 and 100. Got {}'.format(percentile)
return self._percentile(self._values, percentile)
|
Get a value representing a the input percentile of the Data Collection.
Args:
percentile: A float value from 0 to 100 representing the
requested percentile.
Return:
The Data Collection value at the input percentile
|
def generate(basename, xml):
'''generate complete python implemenation'''
if basename.endswith('.lua'):
filename = basename
else:
filename = basename + '.lua'
msgs = []
enums = []
filelist = []
for x in xml:
msgs.extend(x.message)
enums.extend(x.enum)
filelist.append(os.path.basename(x.filename))
for m in msgs:
if xml[0].little_endian:
m.fmtstr = '<'
else:
m.fmtstr = '>'
for f in m.ordered_fields:
m.fmtstr += mavfmt(f)
m.order_map = [ 0 ] * len(m.fieldnames)
for i in range(0, len(m.fieldnames)):
m.order_map[i] = m.ordered_fieldnames.index(m.fieldnames[i])
print("Generating %s" % filename)
outf = open(filename, "w")
generate_preamble(outf)
generate_msg_table(outf, msgs)
generate_body_fields(outf)
for m in msgs:
generate_msg_fields(outf, m)
for m in msgs:
generate_payload_dissector(outf, m)
generate_packet_dis(outf)
# generate_enums(outf, enums)
# generate_message_ids(outf, msgs)
# generate_classes(outf, msgs)
# generate_mavlink_class(outf, msgs, xml[0])
# generate_methods(outf, msgs)
generate_epilog(outf)
outf.close()
print("Generated %s OK" % filename)
|
generate complete python implemenation
|
def CheckBreakpointsExpiration(self):
"""Completes all breakpoints that have been active for too long."""
with self._lock:
current_time = BreakpointsManager.GetCurrentTime()
if self._next_expiration > current_time:
return
expired_breakpoints = []
self._next_expiration = datetime.max
for breakpoint in six.itervalues(self._active):
expiration_time = breakpoint.GetExpirationTime()
if expiration_time <= current_time:
expired_breakpoints.append(breakpoint)
else:
self._next_expiration = min(self._next_expiration, expiration_time)
for breakpoint in expired_breakpoints:
breakpoint.ExpireBreakpoint()
|
Completes all breakpoints that have been active for too long.
|
def set_link(self, link,y=0,page=-1):
"Set destination of internal link"
if(y==-1):
y=self.y
if(page==-1):
page=self.page
self.links[link]=[page,y]
|
Set destination of internal link
|
def to_xdr_object(self):
"""Creates an XDR Operation object that represents this
:class:`AllowTrust`.
"""
trustor = account_xdr_object(self.trustor)
length = len(self.asset_code)
assert length <= 12
pad_length = 4 - length if length <= 4 else 12 - length
# asset_code = self.asset_code + '\x00' * pad_length
# asset_code = bytearray(asset_code, encoding='utf-8')
asset_code = bytearray(self.asset_code, 'ascii') + b'\x00' * pad_length
asset = Xdr.nullclass()
if len(asset_code) == 4:
asset.type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4
asset.assetCode4 = asset_code
else:
asset.type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12
asset.assetCode12 = asset_code
allow_trust_op = Xdr.types.AllowTrustOp(trustor, asset, self.authorize)
self.body.type = Xdr.const.ALLOW_TRUST
self.body.allowTrustOp = allow_trust_op
return super(AllowTrust, self).to_xdr_object()
|
Creates an XDR Operation object that represents this
:class:`AllowTrust`.
|
def serialize_to_flat(self, name, datas):
"""
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
"""
keys = datas.get('keys', None)
values = datas.get('values', None)
splitter = datas.get('splitter', self._DEFAULT_SPLITTER)
if not keys:
msg = ("Flat reference '{}' lacks of required 'keys' variable or "
"is empty")
raise SerializerError(msg.format(name))
else:
keys = self.value_splitter(name, 'keys', keys, mode=splitter)
if not values:
msg = ("Flat reference '{}' lacks of required 'values' variable "
"or is empty")
raise SerializerError(msg.format(name))
else:
values = self.value_splitter(name, 'values', values, mode=splitter)
if len(values) != len(keys):
msg = ("Flat reference have different length of 'keys' ands "
"'values' variable")
raise SerializerError(msg.format(name))
return OrderedDict(zip(keys, values))
|
Serialize given datas to a flat structure ``KEY:VALUE`` where ``KEY``
comes from ``keys`` variable and ``VALUE`` comes from ``values``
variable.
This means both ``keys`` and ``values`` are required variable to be
correctly filled (each one is a string of item separated with an empty
space). Both resulting list must be the same length.
Arguments:
name (string): Name only used inside possible exception message.
datas (dict): Datas to serialize.
Returns:
dict: Flat dictionnay of serialized reference datas.
|
def macshim():
"""Shim to run 32-bit on 64-bit mac as a sub-process"""
import subprocess, sys
subprocess.call([
sys.argv[0] + '32'
]+sys.argv[1:],
env={"VERSIONER_PYTHON_PREFER_32_BIT":"yes"}
)
|
Shim to run 32-bit on 64-bit mac as a sub-process
|
def title(self):
"""
get title of this node. If an entry for this course is found in the configuration namemap it is used, otherwise the default
value from stud.ip is used.
"""
tmp = c.namemap_lookup(self.id) if c.namemap_lookup(self.id) is not None else self._title
return secure_filename(tmp)
|
get title of this node. If an entry for this course is found in the configuration namemap it is used, otherwise the default
value from stud.ip is used.
|
def _set_member_entry(self, v, load=False):
"""
Setter method for member_entry, mapped from YANG variable /rbridge_id/secpolicy/defined_policy/policies/member_entry (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_entry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_entry() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("member",member_entry.member_entry, yang_name="member-entry", rest_name="member-entry", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='member', extensions={u'tailf-common': {u'info': u'List of defined members', u'cli-suppress-list-no': None, u'callpoint': u'secpolicy_defined_policy_member', u'cli-suppress-key-abbreviation': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="member-entry", rest_name="member-entry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined members', u'cli-suppress-list-no': None, u'callpoint': u'secpolicy_defined_policy_member', u'cli-suppress-key-abbreviation': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """member_entry must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("member",member_entry.member_entry, yang_name="member-entry", rest_name="member-entry", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='member', extensions={u'tailf-common': {u'info': u'List of defined members', u'cli-suppress-list-no': None, u'callpoint': u'secpolicy_defined_policy_member', u'cli-suppress-key-abbreviation': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="member-entry", rest_name="member-entry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined members', u'cli-suppress-list-no': None, u'callpoint': u'secpolicy_defined_policy_member', u'cli-suppress-key-abbreviation': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-fc-auth', defining_module='brocade-fc-auth', yang_type='list', is_config=True)""",
})
self.__member_entry = t
if hasattr(self, '_set'):
self._set()
|
Setter method for member_entry, mapped from YANG variable /rbridge_id/secpolicy/defined_policy/policies/member_entry (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_member_entry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_member_entry() directly.
|
def readlist(self):
"""Sort the reads, and create lists to be used in creating sorted .fastq files"""
printtime('Sorting reads', self.start)
# Create and start threads
for i in range(self.cpus):
# Send the threads to the appropriate destination function
threads = Thread(target=self.listread, args=())
# Set the daemon to true - something to do with thread management
threads.setDaemon(True)
# Start the threading
threads.start()
for sample in self.runmetadata.samples:
self.listqueue.put(sample)
self.listqueue.join()
# Create
self.fastqfilter()
|
Sort the reads, and create lists to be used in creating sorted .fastq files
|
def length(
cls, request,
vector: (Ptypes.body,
Vector('The vector to analyse.'))) -> [
(200, 'Ok', Float),
(400, 'Wrong vector format')]:
'''Return the modulo of a vector.'''
log.info('Computing the length of vector {}'.format(vector))
try:
Respond(200, sqrt(vector['x'] ** 2 +
vector['y'] ** 2 +
vector.get('z', 0) ** 2))
except ValueError:
Respond(400)
|
Return the modulo of a vector.
|
def auto2unicode(text):
"""
This function tries to identify encode in available encodings.
If it finds, then it will convert text into unicode string.
Author : Arulalan.T
04.08.2014
"""
_all_unique_encodes_, _all_common_encodes_ = _get_unique_common_encodes()
# get unique word which falls under any one of available encodes from
# user passed text lines
unique_chars = _get_unique_ch(text, _all_common_encodes_)
# count common encode chars
clen = len(_all_common_encodes_)
msg = "Sorry, couldn't find encode :-(\n"
msg += 'Need more words to find unique encode out side of %d ' % clen
msg += 'common compound characters'
if not unique_chars:
print(msg)
return ''
# end of if not unique_chars:
for encode_name, encode_keys in _all_unique_encodes_:
if not len(encode_keys): continue
for ch in encode_keys:
# check either encode char is presnent in word
if ch in unique_chars:
# found encode
print(("Found encode : ", encode_name))
encode = _all_encodes_[encode_name]
return encode2unicode(text, encode)
# end of if ch in unique_chars:
# end of ifor ch in encode_keys:
else:
print(msg)
return ''
|
This function tries to identify encode in available encodings.
If it finds, then it will convert text into unicode string.
Author : Arulalan.T
04.08.2014
|
async def download_file(
self, input_location, file=None, *, part_size_kb=None,
file_size=None, progress_callback=None, dc_id=None):
"""
Downloads the given input location to a file.
Args:
input_location (:tl:`InputFileLocation`):
The file location from which the file will be downloaded.
See `telethon.utils.get_input_location` source for a complete
list of supported types.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If the file path is ``None`` or ``bytes``, then the result
will be saved in memory and returned as `bytes`.
part_size_kb (`int`, optional):
Chunk size when downloading files. The larger, the less
requests will be made (up to 512KB maximum).
file_size (`int`, optional):
The file size that is about to be downloaded, if known.
Only used if ``progress_callback`` is specified.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(downloaded bytes, total)``. Note that the
``total`` is the provided ``file_size``.
dc_id (`int`, optional):
The data center the library should connect to in order
to download the file. You shouldn't worry about this.
"""
if not part_size_kb:
if not file_size:
part_size_kb = 64 # Reasonable default
else:
part_size_kb = utils.get_appropriated_part_size(file_size)
part_size = int(part_size_kb * 1024)
# https://core.telegram.org/api/files says:
# > part_size % 1024 = 0 (divisible by 1KB)
#
# But https://core.telegram.org/cdn (more recent) says:
# > limit must be divisible by 4096 bytes
# So we just stick to the 4096 limit.
if part_size % 4096 != 0:
raise ValueError(
'The part size must be evenly divisible by 4096.')
in_memory = file is None or file is bytes
if in_memory:
f = io.BytesIO()
elif isinstance(file, str):
# Ensure that we'll be able to download the media
helpers.ensure_parent_dir_exists(file)
f = open(file, 'wb')
else:
f = file
old_dc = dc_id
dc_id, input_location = utils.get_input_location(input_location)
if dc_id is None:
dc_id = old_dc
exported = dc_id and self.session.dc_id != dc_id
if exported:
try:
sender = await self._borrow_exported_sender(dc_id)
except errors.DcIdInvalidError:
# Can't export a sender for the ID we are currently in
config = await self(functions.help.GetConfigRequest())
for option in config.dc_options:
if option.ip_address == self.session.server_address:
self.session.set_dc(
option.id, option.ip_address, option.port)
self.session.save()
break
# TODO Figure out why the session may have the wrong DC ID
sender = self._sender
exported = False
else:
# The used sender will also change if ``FileMigrateError`` occurs
sender = self._sender
self._log[__name__].info('Downloading file in chunks of %d bytes',
part_size)
try:
offset = 0
while True:
try:
result = await sender.send(functions.upload.GetFileRequest(
input_location, offset, part_size
))
if isinstance(result, types.upload.FileCdnRedirect):
# TODO Implement
raise NotImplementedError
except errors.FileMigrateError as e:
self._log[__name__].info('File lives in another DC')
sender = await self._borrow_exported_sender(e.new_dc)
exported = True
continue
offset += part_size
if not result.bytes:
if in_memory:
f.flush()
return f.getvalue()
else:
return getattr(result, 'type', '')
self._log[__name__].debug('Saving %d more bytes',
len(result.bytes))
f.write(result.bytes)
if progress_callback:
progress_callback(f.tell(), file_size)
finally:
if exported:
await self._return_exported_sender(sender)
elif sender != self._sender:
await sender.disconnect()
if isinstance(file, str) or in_memory:
f.close()
|
Downloads the given input location to a file.
Args:
input_location (:tl:`InputFileLocation`):
The file location from which the file will be downloaded.
See `telethon.utils.get_input_location` source for a complete
list of supported types.
file (`str` | `file`, optional):
The output file path, directory, or stream-like object.
If the path exists and is a file, it will be overwritten.
If the file path is ``None`` or ``bytes``, then the result
will be saved in memory and returned as `bytes`.
part_size_kb (`int`, optional):
Chunk size when downloading files. The larger, the less
requests will be made (up to 512KB maximum).
file_size (`int`, optional):
The file size that is about to be downloaded, if known.
Only used if ``progress_callback`` is specified.
progress_callback (`callable`, optional):
A callback function accepting two parameters:
``(downloaded bytes, total)``. Note that the
``total`` is the provided ``file_size``.
dc_id (`int`, optional):
The data center the library should connect to in order
to download the file. You shouldn't worry about this.
|
def read_job(self, job_id, checkout=False):
"""
Reads head and reads the tree into index,
and checkout the work-tree when checkout=True.
This does not fetch the job from the actual server. It needs to be in the local git already.
"""
self.job_id = job_id
commit = self.get_head_commit()
self.logger.debug('Job ref points to ' + commit)
self.command_exec(['read-tree', self.ref_head])
if checkout:
self.logger.debug('Working directory in ' + self.work_tree)
# make sure we have checked out all files we have added until now. Important for simple models,
# so we have the actual model.py and dataset scripts.
if os.path.exists(self.work_tree):
shutil.rmtree(self.work_tree)
os.makedirs(self.work_tree)
# make the working tree reflect exactly the tree of ref_head.
# since we removed the dir before, we have exactly the tree of the reference
# '--', '.' is important to not update HEAD
self.command_exec(['--work-tree', self.work_tree, 'checkout', self.ref_head, '--', '.'])
|
Reads head and reads the tree into index,
and checkout the work-tree when checkout=True.
This does not fetch the job from the actual server. It needs to be in the local git already.
|
def on_post(self, req, resp):
""" Validate the access token request for spec compliance
The spec also dictates the JSON based error response
on failure & is handled in this responder.
"""
grant_type = req.get_param('grant_type')
password = req.get_param('password')
username = req.get_param('username')
# errors or not, disable client caching along the way
# per the spec
resp.disable_caching()
if not grant_type or not password or not username:
resp.status = falcon.HTTP_400
resp.serialize({
'error': 'invalid_request',
'error_description': 'A grant_type, username, & password '
'parameters are all required when '
'requesting an OAuth access_token',
'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2',
})
elif grant_type != 'password':
resp.status = falcon.HTTP_400
resp.serialize({
'error': 'unsupported_grant_type',
'error_description': 'The grant_type parameter MUST be set '
'to "password" not "%s"' % grant_type,
'error_uri': 'tools.ietf.org/html/rfc6749#section-4.3.2',
})
else:
try:
token = self.auth_creds(username, password)
resp.serialize({
'access_token': token,
'token_type': 'Bearer',
})
except AuthRejected as exc:
resp.status = falcon.HTTP_401
resp.set_header('WWW-Authenticate', self._realm)
resp.serialize({
'error': 'invalid_client',
'error_description': exc.detail,
})
|
Validate the access token request for spec compliance
The spec also dictates the JSON based error response
on failure & is handled in this responder.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.