code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def PriceHourly(self):
"""Returns the total hourly price for the server.
Sums unit prices with unit volumes.
>>> clc.v2.Server("NY1BTDIPHYP0101").PriceHourly()
0.02857
"""
units = self.PriceUnits()
return(units['cpu']*self.cpu+units['memory']*self.memory+units['storage']*self.storage+units['managed_os'])
|
Returns the total hourly price for the server.
Sums unit prices with unit volumes.
>>> clc.v2.Server("NY1BTDIPHYP0101").PriceHourly()
0.02857
|
def sequencetyper(self):
"""
Determines the sequence type of each strain based on comparisons to sequence type profiles
"""
for sample in self.metadata.samples:
if sample.general.bestassemblyfile != 'NA':
if type(sample[self.analysistype].allelenames) == list:
#
if sample[self.analysistype].profile != 'NA':
# Initialise dictionaries
sample[self.analysistype].profilematches = dict()
sample[self.analysistype].sequencetypematches = dict()
# Create the profiledata variable to avoid writing self.profiledata[self.analysistype]
profiledata = sample[self.analysistype].profiledata
# For each gene
for gene in sorted(sample[self.analysistype].allelenames):
try:
allelenumber = sample[self.analysistype].allelematches[gene].split('-')[1]
# Find the profile with the most alleles in common with the query genome
for sequencetype in profiledata:
# refallele is the allele number of the sequence type
refallele = profiledata[sequencetype][gene]
if allelenumber == refallele:
# Add matching alleles
try:
sample[self.analysistype].profilematches[sequencetype] += 1
sample[self.analysistype].sequencetypematches[sequencetype].append(
refallele)
except KeyError:
sample[self.analysistype].profilematches[sequencetype] = 1
sample[self.analysistype].sequencetypematches[sequencetype] = list()
sample[self.analysistype].sequencetypematches[sequencetype].append(
refallele)
except KeyError:
pass
|
Determines the sequence type of each strain based on comparisons to sequence type profiles
|
def fields(self):
"""
return all the fields and their raw values for this Orm instance. This
property returns a dict with the field names and their current values
if you want to control the values for outputting to an api, use .jsonable()
"""
return {k:getattr(self, k, None) for k in self.schema.fields}
|
return all the fields and their raw values for this Orm instance. This
property returns a dict with the field names and their current values
if you want to control the values for outputting to an api, use .jsonable()
|
def meta_wrapped(f):
"""
Add a field label, errors, and a description (if it exists) to
a field.
"""
@wraps(f)
def wrapped(self, field, *args, **kwargs):
html = "{label}{errors}{original}<small>{description}</small>".format(
label=field.label(class_='control-label'),
original=f(self, field, *args, **kwargs),
errors=render_field_errors(field) or '',
description=render_field_description(field)
)
return HTMLString(html)
return wrapped
|
Add a field label, errors, and a description (if it exists) to
a field.
|
def validate_timestamp_and_nonce(self, client_key, timestamp, nonce,
request, request_token=None,
access_token=None):
"""Validate the timestamp and nonce is used or not."""
log.debug('Validate timestamp and nonce %r', client_key)
nonce_exists = self._noncegetter(
client_key=client_key, timestamp=timestamp,
nonce=nonce, request_token=request_token,
access_token=access_token
)
if nonce_exists:
return False
self._noncesetter(
client_key=client_key, timestamp=timestamp,
nonce=nonce, request_token=request_token,
access_token=access_token
)
return True
|
Validate the timestamp and nonce is used or not.
|
def proximal_l2(space, lam=1, g=None):
r"""Proximal operator factory of the l2-norm/distance.
Function for the proximal operator of the functional ``F`` where ``F``
is the l2-norm (or distance to g, if given)::
``F(x) = lam ||x - g||_2``
Parameters
----------
space : `LinearSpace`
Domain of F(x). Needs to be a Hilbert space.
That is, have an inner product (`LinearSpace.inner`).
lam : positive float, optional
Scaling factor or regularization parameter.
g : ``space`` element, optional
An element in ``space``. Default: ``space.zero``.
Returns
-------
prox_factory : callable
Factory for the proximal operator to be initialized.
Notes
-----
Most problems are forumlated for the squared norm/distance, in that case
use `proximal_l2_squared` instead.
The :math:`L_2`-norm/distance :math:`F` is given by
.. math::
F(x) = \lambda \|x - g\|_2
For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F`
is given by
.. math::
\mathrm{prox}_{\sigma F}(y) = \begin{cases}
\frac{1 - c}{\|y-g\|} \cdot y + c \cdot g
& \text{if } c < g, \\
g & \text{else},
\end{cases}
where :math:`c = \sigma \frac{\lambda}{\|y - g\|_2}`.
See Also
--------
proximal_l2_squared : proximal for squared norm/distance
proximal_convex_conj_l2 : proximal for convex conjugate
"""
lam = float(lam)
if g is not None and g not in space:
raise TypeError('{!r} is not an element of {!r}'.format(g, space))
class ProximalL2(Operator):
"""Proximal operator of the l2-norm/distance."""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter
"""
super(ProximalL2, self).__init__(
domain=space, range=space, linear=False)
self.sigma = float(sigma)
def _call(self, x, out):
"""Apply the operator to ``x`` and stores the result in ``out``."""
dtype = getattr(self.domain, 'dtype', float)
eps = np.finfo(dtype).resolution * 10
if g is None:
x_norm = x.norm() * (1 + eps)
if x_norm > 0:
step = self.sigma * lam / x_norm
else:
step = np.infty
if step < 1.0:
out.lincomb(1.0 - step, x)
else:
out.set_zero()
else:
x_norm = (x - g).norm() * (1 + eps)
if x_norm > 0:
step = self.sigma * lam / x_norm
else:
step = np.infty
if step < 1.0:
out.lincomb(1.0 - step, x, step, g)
else:
out.assign(g)
return ProximalL2
|
r"""Proximal operator factory of the l2-norm/distance.
Function for the proximal operator of the functional ``F`` where ``F``
is the l2-norm (or distance to g, if given)::
``F(x) = lam ||x - g||_2``
Parameters
----------
space : `LinearSpace`
Domain of F(x). Needs to be a Hilbert space.
That is, have an inner product (`LinearSpace.inner`).
lam : positive float, optional
Scaling factor or regularization parameter.
g : ``space`` element, optional
An element in ``space``. Default: ``space.zero``.
Returns
-------
prox_factory : callable
Factory for the proximal operator to be initialized.
Notes
-----
Most problems are forumlated for the squared norm/distance, in that case
use `proximal_l2_squared` instead.
The :math:`L_2`-norm/distance :math:`F` is given by
.. math::
F(x) = \lambda \|x - g\|_2
For a step size :math:`\sigma`, the proximal operator of :math:`\sigma F`
is given by
.. math::
\mathrm{prox}_{\sigma F}(y) = \begin{cases}
\frac{1 - c}{\|y-g\|} \cdot y + c \cdot g
& \text{if } c < g, \\
g & \text{else},
\end{cases}
where :math:`c = \sigma \frac{\lambda}{\|y - g\|_2}`.
See Also
--------
proximal_l2_squared : proximal for squared norm/distance
proximal_convex_conj_l2 : proximal for convex conjugate
|
def nla_get_u64(nla):
"""Return value of 64 bit integer attribute as an int().
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L649
Positional arguments:
nla -- 64 bit integer attribute (nlattr class instance).
Returns:
Payload as an int().
"""
tmp = c_uint64(0)
if nla and nla_len(nla) >= sizeof(tmp):
tmp = c_uint64.from_buffer(nla_data(nla)[:SIZEOF_U64])
return int(tmp.value)
|
Return value of 64 bit integer attribute as an int().
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/attr.c#L649
Positional arguments:
nla -- 64 bit integer attribute (nlattr class instance).
Returns:
Payload as an int().
|
def parents(self):
"""~TermList: The direct parents of the `Term`.
"""
if self._parents is None:
bottomups = tuple(Relationship.bottomup())
self._parents = TermList()
self._parents.extend(
[ other
for rship,others in six.iteritems(self.relations)
for other in others
if rship in bottomups
]
)
return self._parents
|
~TermList: The direct parents of the `Term`.
|
def get_dos_from_id(self, task_id):
"""
Overrides the get_dos_from_id for the MIT gridfs format.
"""
args = {'task_id': task_id}
fields = ['calculations']
structure = self.get_structure_from_id(task_id)
dosid = None
for r in self.query(fields, args):
dosid = r['calculations'][-1]['dos_fs_id']
if dosid is not None:
self._fs = gridfs.GridFS(self.db, 'dos_fs')
with self._fs.get(dosid) as dosfile:
s = dosfile.read()
try:
d = json.loads(s)
except:
s = zlib.decompress(s)
d = json.loads(s.decode("utf-8"))
tdos = Dos.from_dict(d)
pdoss = {}
for i in range(len(d['pdos'])):
ados = d['pdos'][i]
all_ados = {}
for j in range(len(ados)):
orb = Orbital(j)
odos = ados[str(orb)]
all_ados[orb] = {Spin(int(k)): v
for k, v
in odos['densities'].items()}
pdoss[structure[i]] = all_ados
return CompleteDos(structure, tdos, pdoss)
return None
|
Overrides the get_dos_from_id for the MIT gridfs format.
|
def grab_gpus(num_gpus=1, gpu_select=None, gpu_fraction=0.95, max_procs=-1):
"""
Checks for gpu availability and sets CUDA_VISIBLE_DEVICES as such.
Note that this function does not do anything to 'reserve' gpus, it only
limits what GPUS your program can see by altering the CUDA_VISIBLE_DEVICES
variable. Other programs can still come along and snatch your gpu. This
function is more about preventing **you** from stealing someone else's GPU.
If more than 1 GPU is requested but the full amount are available, then it
will set the CUDA_VISIBLE_DEVICES variable to see all the available GPUs.
A warning is generated in this case.
If one or more GPUs were requested and none were available, a Warning
will be raised. Before raising it, the CUDA_VISIBLE_DEVICES will be set to a
blank string. This means the calling function can ignore this warning and
proceed if it chooses to only use the CPU, and it should still be protected
against putting processes on a busy GPU.
You can call this function with num_gpus=0 to blank out the
CUDA_VISIBLE_DEVICES environment variable.
Parameters
----------
num_gpus : int
How many gpus your job needs (optional)
gpu_select : iterable
A single int or an iterable of ints indicating gpu numbers to
search through. If left blank, will search through all gpus.
gpu_fraction : float
The fractional of a gpu memory that must be free for the script to see
the gpu as free. Defaults to 1. Useful if someone has grabbed a tiny
amount of memory on a gpu but isn't using it.
max_procs : int
Maximum number of processes allowed on a GPU (as well as memory
restriction).
Returns
-------
success : int
Number of gpus 'grabbed'
Raises
------
RuntimeWarning
If couldn't connect with NVIDIA drivers.
If 1 or more gpus were requested and none were available.
ValueError
If the gpu_select option was not understood (can fix by leaving this
field blank, providing an int or an iterable of ints).
"""
# Set the visible devices to blank.
os.environ['CUDA_VISIBLE_DEVICES'] = ""
if num_gpus == 0:
return 0
# Try connect with NVIDIA drivers
logger = logging.getLogger(__name__)
try:
py3nvml.nvmlInit()
except:
str_ = """ Couldn't connect to nvml drivers. Check they are installed correctly.
Proceeding on cpu only..."""
warnings.warn(str_, RuntimeWarning)
logger.warn(str_)
return 0
numDevices = py3nvml.nvmlDeviceGetCount()
gpu_free = [False]*numDevices
# Flag which gpus we can check
if gpu_select is None:
gpu_check = [True] * numDevices
else:
gpu_check = [False] * numDevices
try:
gpu_check[gpu_select] = True
except TypeError:
try:
for i in gpu_select:
gpu_check[i] = True
except:
raise ValueError('''Please provide an int or an iterable of ints
for gpu_select''')
# Print out GPU device info. Useful for debugging.
for i in range(numDevices):
# If the gpu was specified, examine it
if not gpu_check[i]:
continue
handle = py3nvml.nvmlDeviceGetHandleByIndex(i)
info = py3nvml.nvmlDeviceGetMemoryInfo(handle)
str_ = "GPU {}:\t".format(i) + \
"Used Mem: {:>6}MB\t".format(info.used/(1024*1024)) + \
"Total Mem: {:>6}MB".format(info.total/(1024*1024))
logger.debug(str_)
# Check the number of procs running on each gpu
if max_procs >= 0:
procs_ok = get_free_gpus(max_procs=max_procs)
else:
procs_ok = [True,] * numDevices
# Now check if any devices are suitable
for i in range(numDevices):
# If the gpu was not specified, skip it
if gpu_check[i] and procs_ok[i]:
handle = py3nvml.nvmlDeviceGetHandleByIndex(i)
info = py3nvml.nvmlDeviceGetMemoryInfo(handle)
# Sometimes GPU has a few MB used when it is actually free
if (info.free+10)/info.total >= gpu_fraction:
gpu_free[i] = True
else:
logger.info('GPU {} has processes on it. Skipping.'.format(i))
py3nvml.nvmlShutdown()
# Now check whether we can create the session
if sum(gpu_free) == 0:
warnings.warn("Could not find enough GPUs for your job", RuntimeWarning)
logger.warn(str_)
return 0
else:
if sum(gpu_free) >= num_gpus:
# only use the first num_gpus gpus. Hide the rest from greedy
# tensorflow
available_gpus = [i for i, x in enumerate(gpu_free) if x]
use_gpus = ','.join(list(str(s) for s in available_gpus[:num_gpus]))
logger.debug('{} Gpus found free'.format(sum(gpu_free)))
logger.info('Using {}'.format(use_gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = use_gpus
return num_gpus
else:
# use everything we can.
s = "Only {} GPUs found but {}".format(sum(gpu_free), num_gpus) + \
"requested. Allocating these and continuing."
warnings.warn(s, RuntimeWarning)
logger.warn(s)
available_gpus = [i for i, x in enumerate(gpu_free) if x]
use_gpus = ','.join(list(str(s) for s in available_gpus))
logger.debug('{} Gpus found free'.format(sum(gpu_free)))
logger.info('Using {}'.format(use_gpus))
os.environ['CUDA_VISIBLE_DEVICES'] = use_gpus
return sum(gpu_free)
|
Checks for gpu availability and sets CUDA_VISIBLE_DEVICES as such.
Note that this function does not do anything to 'reserve' gpus, it only
limits what GPUS your program can see by altering the CUDA_VISIBLE_DEVICES
variable. Other programs can still come along and snatch your gpu. This
function is more about preventing **you** from stealing someone else's GPU.
If more than 1 GPU is requested but the full amount are available, then it
will set the CUDA_VISIBLE_DEVICES variable to see all the available GPUs.
A warning is generated in this case.
If one or more GPUs were requested and none were available, a Warning
will be raised. Before raising it, the CUDA_VISIBLE_DEVICES will be set to a
blank string. This means the calling function can ignore this warning and
proceed if it chooses to only use the CPU, and it should still be protected
against putting processes on a busy GPU.
You can call this function with num_gpus=0 to blank out the
CUDA_VISIBLE_DEVICES environment variable.
Parameters
----------
num_gpus : int
How many gpus your job needs (optional)
gpu_select : iterable
A single int or an iterable of ints indicating gpu numbers to
search through. If left blank, will search through all gpus.
gpu_fraction : float
The fractional of a gpu memory that must be free for the script to see
the gpu as free. Defaults to 1. Useful if someone has grabbed a tiny
amount of memory on a gpu but isn't using it.
max_procs : int
Maximum number of processes allowed on a GPU (as well as memory
restriction).
Returns
-------
success : int
Number of gpus 'grabbed'
Raises
------
RuntimeWarning
If couldn't connect with NVIDIA drivers.
If 1 or more gpus were requested and none were available.
ValueError
If the gpu_select option was not understood (can fix by leaving this
field blank, providing an int or an iterable of ints).
|
def outputPoint(self):
"""
Returns a scene space point that the connection \
will draw to as its output source. If the connection \
has a node defined, then it will calculate the output \
point based on the position of the node, factoring in \
preference for output location and fixed positions. If \
there is no node connected, then the point defined using \
the setOutputPoint method will be used.
:return <QPointF>
"""
node = self.outputNode()
# return the set point
if not node:
return self._outputPoint
# test for the hotspot
hotspot = self.outputHotspot()
# otherwise, calculate the point based on location and fixed positions
olocation = self.outputLocation()
ofixedx = self.outputFixedX()
ofixedy = self.outputFixedY()
loc_left = XNodeConnection.Location.Left
loc_right = XNodeConnection.Location.Right
loc_top = XNodeConnection.Location.Top
loc_bot = XNodeConnection.Location.Bottom
irect = self.inputRect()
orect = self.outputRect()
# return the right location
if olocation & loc_right and orect.right() < irect.left():
if hotspot:
return node.mapToScene(QPointF(hotspot.rect().right(),
hotspot.rect().center().y()))
else:
return node.positionAt(loc_right, ofixedx, ofixedy)
# return the left location
elif olocation & loc_left and irect.right() < orect.left():
if hotspot:
return node.mapToScene(QPointF(hotspot.rect().left(),
hotspot.rect().center().y()))
else:
return node.positionAt(loc_left, ofixedx, ofixedy)
# return the bottom location
elif olocation & loc_bot and orect.bottom() < irect.top():
if hotspot:
return node.mapToScene(QPointF(hotspot.rect().center().x(),
hotspot.rect().bottom()))
else:
return node.positionAt(loc_bot, ofixedx, ofixedy)
# return the top location
elif olocation & loc_top and irect.bottom() < orect.top():
if hotspot:
return node.mapToScene(QPointF(hotspot.rect().center().x(),
hotspot.rect().top()))
else:
return node.positionAt(loc_top, ofixedx, ofixedy)
# return the center point
else:
if hotspot:
return node.mapToScene(hotspot.rect().center())
else:
return node.positionAt(olocation, ofixedx, ofixedy)
|
Returns a scene space point that the connection \
will draw to as its output source. If the connection \
has a node defined, then it will calculate the output \
point based on the position of the node, factoring in \
preference for output location and fixed positions. If \
there is no node connected, then the point defined using \
the setOutputPoint method will be used.
:return <QPointF>
|
def _compute_raw_moments(self, n_counter, k_counter):
r"""
Compute :math:`X_i`
Gamma type 1: :math:`X_i = \frac {\beta_i}{\beta_0}Y_0 + Y_i`
Gamma type 2: :math:`X_i = \sum_{k=0}^{i} \frac {\beta_i}{\beta_k}Y_k`
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of parametric expression for raw moments
"""
alpha_multipliers, beta_multipliers = self._get_parameter_symbols(n_counter, k_counter)
out_mat = sp.Matrix([a * b for a,b in zip(alpha_multipliers, beta_multipliers)])
out_mat = out_mat.applyfunc(sp.expand)
return out_mat
|
r"""
Compute :math:`X_i`
Gamma type 1: :math:`X_i = \frac {\beta_i}{\beta_0}Y_0 + Y_i`
Gamma type 2: :math:`X_i = \sum_{k=0}^{i} \frac {\beta_i}{\beta_k}Y_k`
:param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments
:type n_counter: list[:class:`~means.core.descriptors.Moment`]
:param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments
:type k_counter: list[:class:`~means.core.descriptors.Moment`]
:return: a vector of parametric expression for raw moments
|
def isMasterReqLatencyTooHigh(self):
"""
Return whether the request latency of the master instance is greater
than the acceptable threshold
"""
# TODO for now, view_change procedure can take more that 15 minutes
# (5 minutes for catchup and 10 minutes for primary's answer).
# Therefore, view_change triggering by max latency is not indicative now.
r = self.masterReqLatencyTooHigh or \
next(((key, lat) for key, lat in self.masterReqLatencies.items() if
lat > self.Lambda), None)
if r:
logger.display("{}{} found master's latency {} to be higher than the threshold for request {}.".
format(MONITORING_PREFIX, self, r[1], r[0]))
else:
logger.trace("{} found master's latency to be lower than the "
"threshold for all requests.".format(self))
return r
|
Return whether the request latency of the master instance is greater
than the acceptable threshold
|
def _ancestors_or_self(
self, qname: Union[QualName, bool] = None) -> List[InstanceNode]:
"""XPath - return the list of receiver's ancestors including itself."""
res = [] if qname and self.qual_name != qname else [self]
return res + self.up()._ancestors(qname)
|
XPath - return the list of receiver's ancestors including itself.
|
def _check_portname(name):
'''
Check if portname is valid and whether or not the directory exists in the
ports tree.
'''
if not isinstance(name, string_types) or '/' not in name:
raise SaltInvocationError(
'Invalid port name \'{0}\' (category required)'.format(name)
)
path = os.path.join('/usr/ports', name)
if not os.path.isdir(path):
raise SaltInvocationError('Path \'{0}\' does not exist'.format(path))
return path
|
Check if portname is valid and whether or not the directory exists in the
ports tree.
|
def register_calculator_view(request):
"""Register a calculator."""
if request.method == "POST":
form = CalculatorRegistrationForm(request.POST)
logger.debug(form)
if form.is_valid():
obj = form.save()
obj.user = request.user
obj.save()
messages.success(request, "Successfully added calculator.")
return redirect("itemreg")
else:
messages.error(request, "Error adding calculator.")
else:
form = CalculatorRegistrationForm()
return render(request, "itemreg/register_form.html", {"form": form, "action": "add", "type": "calculator", "form_route": "itemreg_calculator"})
|
Register a calculator.
|
def ip_hide_as_path_holder_as_path_access_list_instance(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def")
hide_as_path_holder = ET.SubElement(ip, "hide-as-path-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
as_path = ET.SubElement(hide_as_path_holder, "as-path")
access_list = ET.SubElement(as_path, "access-list")
name_key = ET.SubElement(access_list, "name")
name_key.text = kwargs.pop('name')
seq_keyword_key = ET.SubElement(access_list, "seq-keyword")
seq_keyword_key.text = kwargs.pop('seq_keyword')
instance = ET.SubElement(access_list, "instance")
instance.text = kwargs.pop('instance')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def run(self, *args):
"""List, add or delete organizations and domains from the registry.
By default, it prints the list of organizations available on
the registry.
"""
params = self.parser.parse_args(args)
organization = params.organization
domain = params.domain
is_top_domain = params.top_domain
overwrite = params.overwrite
if params.add:
code = self.add(organization, domain, is_top_domain, overwrite)
elif params.delete:
code = self.delete(organization, domain)
else:
term = organization
code = self.registry(term)
return code
|
List, add or delete organizations and domains from the registry.
By default, it prints the list of organizations available on
the registry.
|
def remove_old(self, max_log_time):
"""Remove all logs which are older than the specified time."""
files = glob.glob('{}/queue-*'.format(self.log_dir))
files = list(map(lambda x: os.path.basename(x), files))
for log_file in files:
# Get time stamp from filename
name = os.path.splitext(log_file)[0]
timestamp = name.split('-', maxsplit=1)[1]
# Get datetime from time stamp
time = datetime.strptime(timestamp, '%Y%m%d-%H%M')
now = datetime.now()
# Get total delta in seconds
delta = now - time
seconds = delta.total_seconds()
# Delete log file, if the delta is bigger than the specified log time
if seconds > int(max_log_time):
log_filePath = os.path.join(self.log_dir, log_file)
os.remove(log_filePath)
|
Remove all logs which are older than the specified time.
|
def ingest_data(self, data, cat_name, id_col, ra_col='_RAJ2000', dec_col='_DEJ2000', cat_loc='', append=False, count=-1):
"""
Ingest a data file and regroup sources
Parameters
----------
data: str, pandas.DataFrame, astropy.table.Table
The path to the exported VizieR data or the data table
cat_name: str
The name of the added catalog
id_col: str
The name of the column containing the unique ids
ra_col: str
The name of the RA column
dec_col: str
The name of the DEC column
cat_loc: str
The location of the original catalog data
append: bool
Append the catalog rather than replace
count: int
The number of table rows to add
(This is mainly for testing purposes)
"""
# Check if the catalog is already ingested
if not append and cat_name in self.catalogs:
print('Catalog {} already ingested.'.format(cat_name))
else:
if isinstance(data, str):
cat_loc = cat_loc or data
data = pd.read_csv(data, sep='\t', comment='#', engine='python')[:count]
elif isinstance(data, pd.core.frame.DataFrame):
cat_loc = cat_loc or type(data)
elif isinstance(data, (at.QTable, at.Table)):
cat_loc = cat_loc or type(data)
data = pd.DataFrame(list(data), columns=data.colnames)
else:
print("Sorry, but I cannot read that data. Try an ascii file cat_loc, astropy table, or pandas data frame.")
return
# Make sure ra and dec are decimal degrees
if isinstance(data[ra_col][0], str):
crds = coord.SkyCoord(ra=data[ra_col], dec=data[dec_col], unit=(q.hour, q.deg), frame='icrs')
data.insert(0,'dec', crds.dec)
data.insert(0,'ra', crds.ra)
elif isinstance(data[ra_col][0], float):
data.rename(columns={ra_col:'ra', dec_col:'dec'}, inplace=True)
else:
print("I can't read the RA and DEC of the input data. Please try again.")
return
# Change some names
try:
last = len(getattr(self, cat_name)) if append else 0
data.insert(0,'catID', ['{}_{}'.format(cat_name,n+1) for n in range(last,last+len(data))])
data.insert(0,'dec_corr', data['dec'])
data.insert(0,'ra_corr', data['ra'])
data.insert(0,'source_id', np.nan)
print('Ingesting {} rows from {} catalog...'.format(len(data),cat_name))
# Save the raw data as an attribute
if append:
setattr(self, cat_name, getattr(self, cat_name).append(data, ignore_index=True))
else:
setattr(self, cat_name, data)
# Update the history
self.history += "\n{}: Catalog {} ingested.".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),cat_name)
self.catalogs.update({cat_name:{'cat_loc':cat_loc, 'id_col':id_col, 'ra_col':ra_col, 'dec_col':dec_col}})
except AttributeError:
print("No catalog named '{}'. Set 'append=False' to create it.".format(cat_name))
|
Ingest a data file and regroup sources
Parameters
----------
data: str, pandas.DataFrame, astropy.table.Table
The path to the exported VizieR data or the data table
cat_name: str
The name of the added catalog
id_col: str
The name of the column containing the unique ids
ra_col: str
The name of the RA column
dec_col: str
The name of the DEC column
cat_loc: str
The location of the original catalog data
append: bool
Append the catalog rather than replace
count: int
The number of table rows to add
(This is mainly for testing purposes)
|
def save(self, filename="temp.pkl"):
"""
Save TM in the filename specified above
"""
output = open(filename, 'wb')
cPickle.dump(self.tm, output, protocol=cPickle.HIGHEST_PROTOCOL)
|
Save TM in the filename specified above
|
def send_ether_over_wpa(self, pkt, **kwargs):
"""Send an Ethernet packet using the WPA channel
Extra arguments will be ignored, and are just left for compatibility
"""
payload = LLC() / SNAP() / pkt[Ether].payload
dest = pkt.dst
if dest == "ff:ff:ff:ff:ff:ff":
self.send_wpa_to_group(payload, dest)
else:
assert dest == self.client
self.send_wpa_to_client(payload)
|
Send an Ethernet packet using the WPA channel
Extra arguments will be ignored, and are just left for compatibility
|
def colorbar(self, cmap, position="right",
label="", clim=("", ""),
border_width=0.0, border_color="black",
**kwargs):
"""Show a ColorBar
Parameters
----------
cmap : str | vispy.color.ColorMap
Either the name of the ColorMap to be used from the standard
set of names (refer to `vispy.color.get_colormap`),
or a custom ColorMap object.
The ColorMap is used to apply a gradient on the colorbar.
position : {'left', 'right', 'top', 'bottom'}
The position of the colorbar with respect to the plot.
'top' and 'bottom' are placed horizontally, while
'left' and 'right' are placed vertically
label : str
The label that is to be drawn with the colorbar
that provides information about the colorbar.
clim : tuple (min, max)
the minimum and maximum values of the data that
is given to the colorbar. This is used to draw the scale
on the side of the colorbar.
border_width : float (in px)
The width of the border the colormap should have. This measurement
is given in pixels
border_color : str | vispy.color.Color
The color of the border of the colormap. This can either be a
str as the color's name or an actual instace of a vipy.color.Color
Returns
-------
colorbar : instance of ColorBarWidget
See also
--------
ColorBarWidget
"""
self._configure_2d()
cbar = scene.ColorBarWidget(orientation=position,
label_str=label,
cmap=cmap,
clim=clim,
border_width=border_width,
border_color=border_color,
**kwargs)
CBAR_LONG_DIM = 50
if cbar.orientation == "bottom":
self.grid.remove_widget(self.cbar_bottom)
self.cbar_bottom = self.grid.add_widget(cbar, row=5, col=4)
self.cbar_bottom.height_max = \
self.cbar_bottom.height_max = CBAR_LONG_DIM
elif cbar.orientation == "top":
self.grid.remove_widget(self.cbar_top)
self.cbar_top = self.grid.add_widget(cbar, row=1, col=4)
self.cbar_top.height_max = self.cbar_top.height_max = CBAR_LONG_DIM
elif cbar.orientation == "left":
self.grid.remove_widget(self.cbar_left)
self.cbar_left = self.grid.add_widget(cbar, row=2, col=1)
self.cbar_left.width_max = self.cbar_left.width_min = CBAR_LONG_DIM
else: # cbar.orientation == "right"
self.grid.remove_widget(self.cbar_right)
self.cbar_right = self.grid.add_widget(cbar, row=2, col=5)
self.cbar_right.width_max = \
self.cbar_right.width_min = CBAR_LONG_DIM
return cbar
|
Show a ColorBar
Parameters
----------
cmap : str | vispy.color.ColorMap
Either the name of the ColorMap to be used from the standard
set of names (refer to `vispy.color.get_colormap`),
or a custom ColorMap object.
The ColorMap is used to apply a gradient on the colorbar.
position : {'left', 'right', 'top', 'bottom'}
The position of the colorbar with respect to the plot.
'top' and 'bottom' are placed horizontally, while
'left' and 'right' are placed vertically
label : str
The label that is to be drawn with the colorbar
that provides information about the colorbar.
clim : tuple (min, max)
the minimum and maximum values of the data that
is given to the colorbar. This is used to draw the scale
on the side of the colorbar.
border_width : float (in px)
The width of the border the colormap should have. This measurement
is given in pixels
border_color : str | vispy.color.Color
The color of the border of the colormap. This can either be a
str as the color's name or an actual instace of a vipy.color.Color
Returns
-------
colorbar : instance of ColorBarWidget
See also
--------
ColorBarWidget
|
def exit_with_exc_info(code=1, message='', print_tb=False, exception=None):
'''Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param message: Message to be printed after the exception information.
:type message: string
:param print_tb: If set to True, prints the exception traceback; otherwise, suppresses it.
:type print_tb: boolean
:type exception: an exception to use in place of the last exception raised
'''
exc_type, exc_value = (exception.__class__, exception) \
if exception is not None else sys.exc_info()[:2]
if exc_type is not None:
if print_tb:
traceback.print_exc()
elif isinstance(exc_value, KeyboardInterrupt):
sys.stderr.write('^C\n')
else:
for line in traceback.format_exception_only(exc_type, exc_value):
sys.stderr.write(line)
sys.stderr.write(message)
if message != '' and not message.endswith('\n'):
sys.stderr.write('\n')
sys.exit(code)
|
Exits the program, printing information about the last exception (if
any) and an optional error message. Uses *exception* instead if provided.
:param code: Exit code.
:type code: integer (valid exit code, 0-255)
:param message: Message to be printed after the exception information.
:type message: string
:param print_tb: If set to True, prints the exception traceback; otherwise, suppresses it.
:type print_tb: boolean
:type exception: an exception to use in place of the last exception raised
|
def timestamp(num_params, p_levels, k_choices, N):
"""
Returns a uniform timestamp with parameter values for file identification
"""
string = "_v%s_l%s_gs%s_k%s_N%s_%s.txt" % (num_params,
p_levels,
k_choices,
N,
dt.strftime(dt.now(),
"%d%m%y%H%M%S"))
return string
|
Returns a uniform timestamp with parameter values for file identification
|
def command_line_arguments(command_line_parameters):
"""Defines the command line parameters that are accepted."""
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('gmm-voxforge',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices = available_databases, default = 'voxforge', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action = 'store_false', help = 'Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# TODO: add MIN-DCT measure
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
|
Defines the command line parameters that are accepted.
|
def send_login_signal(self, request, user, profile, client):
"""
Send a signal that a user logged in. This signal should be sent only if
the user was *not* logged into Django.
"""
signals.login.send(sender=profile.__class__, user=user,
profile=profile, client=client, request=request)
|
Send a signal that a user logged in. This signal should be sent only if
the user was *not* logged into Django.
|
def update_utxoset(self, transaction):
"""Update the UTXO set given ``transaction``. That is, remove
the outputs that the given ``transaction`` spends, and add the
outputs that the given ``transaction`` creates.
Args:
transaction (:obj:`~bigchaindb.models.Transaction`): A new
transaction incoming into the system for which the UTXO
set needs to be updated.
"""
spent_outputs = [
spent_output for spent_output in transaction.spent_outputs
]
if spent_outputs:
self.delete_unspent_outputs(*spent_outputs)
self.store_unspent_outputs(
*[utxo._asdict() for utxo in transaction.unspent_outputs]
)
|
Update the UTXO set given ``transaction``. That is, remove
the outputs that the given ``transaction`` spends, and add the
outputs that the given ``transaction`` creates.
Args:
transaction (:obj:`~bigchaindb.models.Transaction`): A new
transaction incoming into the system for which the UTXO
set needs to be updated.
|
def list_from_env(key, default=""):
"""
Splits a string in the format "a,b,c,d,e,f" into
['a', 'b', 'c', 'd', 'e', 'f', ]
"""
try:
val = os.environ.get(key, default)
return val.split(',')
except (KeyError, ValueError):
return []
|
Splits a string in the format "a,b,c,d,e,f" into
['a', 'b', 'c', 'd', 'e', 'f', ]
|
def run(self, steps=None, resume=False, redo=None):
"""
Run a Stimela recipe.
steps : recipe steps to run
resume : resume recipe from last run
redo : Re-run an old recipe from a .last file
"""
recipe = {
"name" : self.name,
"steps" : []
}
start_at = 0
if redo:
recipe = utils.readJson(redo)
self.log.info('Rerunning recipe {0} from {1}'.format(recipe['name'], redo))
self.log.info('Recreating recipe instance..')
self.jobs = []
for step in recipe['steps']:
# add I/O folders to the json file
# add a string describing the contents of these folders
# The user has to ensure that these folders exist, and have the required content
if step['jtype'] == 'docker':
self.log.info('Adding job \'{0}\' to recipe. The container will be named \'{1}\''.format(step['cab'], step['name']))
cont = docker.Container(step['cab'], step['name'],
label=step['label'], logger=self.log,
shared_memory=step['shared_memory'])
self.log.debug('Adding volumes {0} and environmental variables {1}'.format(step['volumes'], step['environs']))
cont.volumes = step['volumes']
cont.environs = step['environs']
cont.shared_memory = step['shared_memory']
cont.input_content = step['input_content']
cont.msdir_content = step['msdir_content']
cont.logfile = step['logfile']
job = StimelaJob(step['name'], recipe=self, label=step['label'])
job.job = cont
job.jtype = 'docker'
elif step['jtype'] == 'function':
name = step['name']
func = inspect.currentframe().f_back.f_locals[step['function']]
job = StimelaJob(name, recipe=self, label=step['label'])
job.python_job(func, step['parameters'])
job.jtype = 'function'
self.jobs.append(job)
elif resume:
self.log.info("Resuming recipe from last run.")
try:
recipe = utils.readJson(self.resume_file)
except IOError:
raise StimelaRecipeExecutionError("Cannot resume pipeline, resume file '{}' not found".format(self.resume_file))
steps_ = recipe.pop('steps')
recipe['steps'] = []
_steps = []
for step in steps_:
if step['status'] == 'completed':
recipe['steps'].append(step)
continue
label = step['label']
number = step['number']
# Check if the recipe flow has changed
if label == self.jobs[number-1].label:
self.log.info('recipe step \'{0}\' is fit for re-execution. Label = {1}'.format(number, label))
_steps.append(number)
else:
raise StimelaRecipeExecutionError('Recipe flow, or task scheduling has changed. Cannot resume recipe. Label = {0}'.format(label))
# Check whether there are steps to resume
if len(_steps)==0:
self.log.info('All the steps were completed. No steps to resume')
sys.exit(0)
steps = _steps
if getattr(steps, '__iter__', False):
_steps = []
if isinstance(steps[0], str):
labels = [ job.label.split('::')[0] for job in self.jobs]
for step in steps:
try:
_steps.append(labels.index(step)+1)
except ValueError:
raise StimelaCabParameterError('Recipe label ID [{0}] doesn\'t exist'.format(step))
steps = _steps
else:
steps = range(1, len(self.jobs)+1)
jobs = [(step, self.jobs[step-1]) for step in steps]
for i, (step, job) in enumerate(jobs):
self.log.info('Running job {}'.format(job.name))
self.log.info('STEP {0} :: {1}'.format(i+1, job.label))
self.active = job
try:
if job.jtype == 'function':
job.run_python_job()
elif job.jtype in ['docker', 'singularity']:
with open(job.job.logfile, 'a') as astd:
astd.write('\n-----------------------------------\n')
astd.write('Stimela version : {}\n'.format(version.version))
astd.write('Cab name : {}\n'.format(job.job.image))
astd.write('-------------------------------------\n')
run_job = getattr(job, "run_{0:s}_job".format(job.jtype))
run_job()
self.log2recipe(job, recipe, step, 'completed')
except (utils.StimelaCabRuntimeError,
StimelaRecipeExecutionError,
StimelaCabParameterError) as e:
self.completed = [jb[1] for jb in jobs[:i]]
self.remaining = [jb[1] for jb in jobs[i+1:]]
self.failed = job
self.log.info('Recipe execution failed while running job {}'.format(job.name))
self.log.info('Completed jobs : {}'.format([c.name for c in self.completed]))
self.log.info('Remaining jobs : {}'.format([c.name for c in self.remaining]))
self.log2recipe(job, recipe, step, 'failed')
for step, jb in jobs[i+1:]:
self.log.info('Logging remaining task: {}'.format(jb.label))
self.log2recipe(jb, recipe, step, 'remaining')
self.log.info('Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
pe = PipelineException(e, self.completed, job, self.remaining)
raise_(pe, None, sys.exc_info()[2])
except:
import traceback
traceback.print_exc()
raise RuntimeError("An unhandled exception has occured. This is a bug, please report")
finally:
if job.jtype == 'docker' and job.created:
job.job.stop()
job.job.remove()
if job.jtype == 'singularity' and job.created:
job.job.stop()
self.log.info('Saving pipeline information in {}'.format(self.resume_file))
utils.writeJson(self.resume_file, recipe)
self.log.info('Recipe executed successfully')
return 0
|
Run a Stimela recipe.
steps : recipe steps to run
resume : resume recipe from last run
redo : Re-run an old recipe from a .last file
|
def get_force_single(self, component_info=None, data=None, component_position=None):
"""Get a single force data channel."""
components = []
append_components = components.append
for _ in range(component_info.plate_count):
component_position, plate = QRTPacket._get_exact(
RTForcePlateSingle, data, component_position
)
component_position, force = QRTPacket._get_exact(
RTForce, data, component_position
)
append_components((plate, force))
return components
|
Get a single force data channel.
|
def download(self, path, args=[], filepath=None, opts={},
compress=True, **kwargs):
"""Makes a request to the IPFS daemon to download a file.
Downloads a file or files from IPFS into the current working
directory, or the directory given by ``filepath``.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
filepath : str
The local path where IPFS will store downloaded files
Defaults to the current working directory.
args : list
Positional parameters to be sent along with the HTTP request
opts : dict
Query string paramters to be sent along with the HTTP request
compress : bool
Whether the downloaded file should be GZip compressed by the
daemon before being sent to the client
kwargs : dict
Additional arguments to pass to :mod:`requests`
"""
url = self.base + path
wd = filepath or '.'
params = []
params.append(('stream-channels', 'true'))
params.append(('archive', 'true'))
if compress:
params.append(('compress', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'get'
res = self._do_request(method, url, params=params, stream=True,
**kwargs)
self._do_raise_for_status(res)
# try to stream download as a tar file stream
mode = 'r|gz' if compress else 'r|'
with tarfile.open(fileobj=res.raw, mode=mode) as tf:
tf.extractall(path=wd)
|
Makes a request to the IPFS daemon to download a file.
Downloads a file or files from IPFS into the current working
directory, or the directory given by ``filepath``.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
filepath : str
The local path where IPFS will store downloaded files
Defaults to the current working directory.
args : list
Positional parameters to be sent along with the HTTP request
opts : dict
Query string paramters to be sent along with the HTTP request
compress : bool
Whether the downloaded file should be GZip compressed by the
daemon before being sent to the client
kwargs : dict
Additional arguments to pass to :mod:`requests`
|
def plugin_method(*plugin_names):
"""Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True
"""
def wrapper(callable_obj):
for plugin_name in plugin_names:
if not hasattr(callable_obj, plugin_name):
setattr(callable_obj, plugin_name, True)
return callable_obj
return wrapper
|
Plugin Method decorator.
Signs a web handler function with the plugins to be applied as attributes.
Args:
plugin_names (list): A list of plugin callable names
Returns:
A wrapped handler callable.
Examples:
>>> @plugin_method('json', 'bill')
... def method():
... return "Hello!"
...
>>> print method.json
True
>>> print method.bill
True
|
def _get_attrs(self):
"""An internal helper for the representation methods"""
attrs = []
attrs.append(("N Blocks", self.n_blocks, "{}"))
bds = self.bounds
attrs.append(("X Bounds", (bds[0], bds[1]), "{:.3f}, {:.3f}"))
attrs.append(("Y Bounds", (bds[2], bds[3]), "{:.3f}, {:.3f}"))
attrs.append(("Z Bounds", (bds[4], bds[5]), "{:.3f}, {:.3f}"))
return attrs
|
An internal helper for the representation methods
|
def set_source_ip_for_interface(source_ip_address, desired_source_ip_address, device_num=0):
"""Configures the source IP address for a Linux interface
:param source_ip_address: (str) Source IP address to change
:param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets
:param device_num: (int) Integer interface device number to configure
:return: None
:raises: TypeError, ValueError, OSError
"""
log = logging.getLogger(mod_logger + '.set_source_ip_for_interface')
if not isinstance(source_ip_address, basestring):
msg = 'arg source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not isinstance(desired_source_ip_address, basestring):
msg = 'arg desired_source_ip_address must be a string'
log.error(msg)
raise TypeError(msg)
if not validate_ip_address(ip_address=source_ip_address):
msg = 'The arg source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
if not validate_ip_address(ip_address=desired_source_ip_address):
msg = 'The arg desired_source_ip_address was found to be an invalid IP address. Please pass a valid IP address'
log.error(msg)
raise ValueError(msg)
# Determine the device name based on the device_num
log.debug('Attempting to determine the device name based on the device_num arg...')
try:
int(device_num)
except ValueError:
if isinstance(device_num, basestring):
device_name = device_num
log.info('Provided device_num is not an int, assuming it is the full device name: {d}'.format(
d=device_name))
else:
raise TypeError('device_num arg must be a string or int')
else:
device_name = 'eth{n}'.format(n=str(device_num))
log.info('Provided device_num is an int, assuming device name is: {d}'.format(d=device_name))
# Build the command
# iptables -t nat -I POSTROUTING -o eth0 -s ${RA_ORIGINAL_IP} -j SNAT --to-source
command = ['iptables', '-t', 'nat', '-I', 'POSTROUTING', '-o', device_name, '-s',
source_ip_address, '-j', 'SNAT', '--to-source', desired_source_ip_address]
log.info('Running command: {c}'.format(c=command))
try:
result = run_command(command, timeout_sec=20)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'There was a problem running iptables command: {c}\n{e}'.format(c=' '.join(command), e=str(ex))
log.error(msg)
raise OSError, msg, trace
if int(result['code']) != 0:
msg = 'The iptables command produced an error with exit code: {c}, and output:\n{o}'.format(
c=result['code'], o=result['output'])
log.error(msg)
raise OSError(msg)
log.info('Successfully configured the source IP for {d} to be: {i}'.format(
d=device_name, i=desired_source_ip_address))
|
Configures the source IP address for a Linux interface
:param source_ip_address: (str) Source IP address to change
:param desired_source_ip_address: (str) IP address to configure as the source in outgoing packets
:param device_num: (int) Integer interface device number to configure
:return: None
:raises: TypeError, ValueError, OSError
|
def temporal_participation_coeff(tnet, communities=None, decay=None, removeneg=False):
r'''
Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes.
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Only positive matrices considered.
communities : array
community vector. Either 1D (node) community index or 2D (node,time).
removeneg : bool (default false)
If true, all values < 0 are made to be 0.
Returns
-------
P : array
participation coefficient
Notes
-----
Static participatoin coefficient is:
.. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2
Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_
This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t.
If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in.
References
----------
.. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_]
'''
if communities is None:
if isinstance(tnet, dict):
if 'communities' in tnet.keys():
communities = tnet['communities']
else:
raise ValueError('Community index not found')
else:
raise ValueError('Community must be provided for graphlet input')
# Get input in right format
tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN')
if tnet.nettype[0] == 'w':
# TODO add contingency when hdf5 data has negative edges
if tnet.hdf5 == False:
if sum(tnet.network['weight'] < 0) > 0 and not removeneg:
print(
'TENETO WARNING: negative edges exist when calculating participation coefficient.')
else:
tnet.network['weight'][tnet.network['weight'] < 0] = 0
part = np.zeros([tnet.netshape[0], tnet.netshape[1]])
if len(communities.shape) == 1:
for t in np.arange(0, tnet.netshape[1]):
C = communities
snapshot = tnet.get_network_when(t=t)
if tnet.nettype[1] == 'd':
i_at_t = snapshot['i'].values
else:
i_at_t = np.concatenate(
[snapshot['i'].values, snapshot['j'].values])
i_at_t = np.unique(i_at_t).tolist()
i_at_t = list(map(int, i_at_t))
for i in i_at_t:
# Calculate degree of node
if tnet.nettype[1] == 'd':
df = tnet.get_network_when(i=i, t=t)
j_at_t = df['j'].values
if tnet.nettype == 'wd':
k_i = df['weight'].sum()
elif tnet.nettype == 'bd':
k_i = len(df)
elif tnet.nettype[1] == 'u':
df = tnet.get_network_when(ij=i, t=t)
j_at_t = np.concatenate([df['i'].values, df['j'].values])
if tnet.nettype == 'wu':
k_i = df['weight'].sum()
elif tnet.nettype == 'bu':
k_i = len(df)
j_at_t = list(map(int, j_at_t))
for c in np.unique(C[j_at_t]):
ci = np.where(C == c)[0].tolist()
k_is = tnet.get_network_when(i=i, j=ci, t=t)
if tnet.nettype[1] == 'u':
k_is2 = tnet.get_network_when(j=i, i=ci, t=t)
k_is = pd.concat([k_is, k_is2])
if len(k_is) > 0:
if tnet.nettype[0] == 'b':
k_is = len(k_is)
else:
k_is = k_is['weight'].sum()
part[i, t] += np.square(k_is/k_i)
part[i_at_t, t] = 1 - part[i_at_t, t]
if decay is not None and t > 0:
part[i_at_t, t] += decay*part[i_at_t, t-1]
else:
for t in np.arange(0, tnet.netshape[1]):
snapshot = tnet.get_network_when(t=t)
if tnet.nettype[1] == 'd':
i_at_t = snapshot['i'].values
else:
i_at_t = np.concatenate(
[snapshot['i'].values, snapshot['j'].values])
i_at_t = np.unique(i_at_t).tolist()
i_at_t = list(map(int, i_at_t))
for i in i_at_t:
for tc in np.arange(0, tnet.netshape[1]):
C = communities[:, tc]
# Calculate degree of node
if tnet.nettype[1] == 'd':
df = tnet.get_network_when(i=i, t=t)
j_at_t = df['j'].values
if tnet.nettype == 'wd':
k_i = df['weight'].sum()
elif tnet.nettype == 'bd':
k_i = len(df)
elif tnet.nettype[1] == 'u':
df = tnet.get_network_when(ij=i, t=t)
j_at_t = np.concatenate(
[df['i'].values, df['j'].values])
if tnet.nettype == 'wu':
k_i = df['weight'].sum()
elif tnet.nettype == 'bu':
k_i = len(df)
j_at_t = list(map(int, j_at_t))
for c in np.unique(C[j_at_t]):
ci = np.where(C == c)[0].tolist()
k_is = tnet.get_network_when(i=i, j=ci, t=t)
if tnet.nettype[1] == 'u':
k_is2 = tnet.get_network_when(j=i, i=ci, t=t)
k_is = pd.concat([k_is, k_is2])
if tnet.nettype[0] == 'b':
k_is = len(k_is)
else:
k_is = k_is['weight'].sum()
part[i, t] += np.square(k_is/k_i)
part[i, t] = part[i, t] / tnet.netshape[1]
part[i_at_t, t] = 1 - part[i_at_t, t]
if decay is not None and t > 0:
part[i_at_t, t] += decay*part[i_at_t, t-1]
# Set any division by 0 to 0
part[np.isnan(part) == 1] = 0
return part
|
r'''
Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes.
Parameters
----------
tnet : array, dict
graphlet or contact sequence input. Only positive matrices considered.
communities : array
community vector. Either 1D (node) community index or 2D (node,time).
removeneg : bool (default false)
If true, all values < 0 are made to be 0.
Returns
-------
P : array
participation coefficient
Notes
-----
Static participatoin coefficient is:
.. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2
Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_
This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t.
If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in.
References
----------
.. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_]
|
def compile_tag_re(self, tags):
"""
Return the regex used to look for Mustache tags compiled to work with
specific opening tags, close tags, and tag types.
"""
return re.compile(self.raw_tag_re % tags, self.re_flags)
|
Return the regex used to look for Mustache tags compiled to work with
specific opening tags, close tags, and tag types.
|
def handle_404(request, exception):
'''Handle 404 Not Found
This handler should be used to handle error http 404 not found for all
endpoints or if resource not available.
'''
error = format_error(title='Resource not found', detail=str(exception))
return json(return_an_error(error), status=HTTPStatus.NOT_FOUND)
|
Handle 404 Not Found
This handler should be used to handle error http 404 not found for all
endpoints or if resource not available.
|
def median_high(data):
"""Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
"""
data = sorted(data)
n = len(data)
if n == 0:
raise StatisticsError("no median for empty data")
return data[n // 2]
|
Return the high median of data.
When the number of data points is odd, the middle value is returned.
When it is even, the larger of the two middle values is returned.
|
def is_text_file(file_path: str) -> bool:
"""Returns if a file contains only ASCII or UTF-8 encoded text.
:param file_path: path to the file being checked
:return: True if the file is a text file, False if it is binary.
"""
import codecs
expanded_path = os.path.abspath(os.path.expanduser(file_path.strip()))
valid_text_file = False
# Check if the file is ASCII
try:
with codecs.open(expanded_path, encoding='ascii', errors='strict') as f:
# Make sure the file has at least one line of text
# noinspection PyUnusedLocal
if sum(1 for line in f) > 0:
valid_text_file = True
except OSError: # pragma: no cover
pass
except UnicodeDecodeError:
# The file is not ASCII. Check if it is UTF-8.
try:
with codecs.open(expanded_path, encoding='utf-8', errors='strict') as f:
# Make sure the file has at least one line of text
# noinspection PyUnusedLocal
if sum(1 for line in f) > 0:
valid_text_file = True
except OSError: # pragma: no cover
pass
except UnicodeDecodeError:
# Not UTF-8
pass
return valid_text_file
|
Returns if a file contains only ASCII or UTF-8 encoded text.
:param file_path: path to the file being checked
:return: True if the file is a text file, False if it is binary.
|
def addFeatureSet(self, featureSet):
"""
Adds the specified featureSet to this dataset.
"""
id_ = featureSet.getId()
self._featureSetIdMap[id_] = featureSet
self._featureSetIds.append(id_)
name = featureSet.getLocalId()
self._featureSetNameMap[name] = featureSet
|
Adds the specified featureSet to this dataset.
|
def add_mountains(self):
"""
instead of the add_blocks function which was to produce
line shaped walls for blocking path finding agents, this
function creates more natural looking blocking areas like
mountains
"""
from noise import pnoise2
import random
random.seed()
octaves = (random.random() * 0.5) + 0.5
freq = 17.0 * octaves #
for y in range(self.grd.grid_height - 1):
for x in range(self.grd.grid_width - 1):
pixel = self.grd.get_tile(y,x)
if pixel == 'X': # denoise blocks of mountains
n = int(pnoise2(x/freq, y / freq, 1)*11+5)
if n < 1:
self.grd.set_tile(y, x, '#')
|
instead of the add_blocks function which was to produce
line shaped walls for blocking path finding agents, this
function creates more natural looking blocking areas like
mountains
|
def _make_eval_func(self, tensors, session, feed_dict, fetches,
callback=None):
"""Construct a function that evaluates a `Tensor` or list of `Tensor`s."""
if not isinstance(tensors, list):
tensors = [tensors]
num_tensors = len(tensors)
def eval_func(x):
"""Function to evaluate a `Tensor`."""
shapes = dict(zip(self._vars, self._var_shapes))
augmented_feed_dict = {
var: x[packing_slice].reshape(shapes[var])
for var, packing_slice in zip(self._vars, self._packing_slices)
}
augmented_feed_dict.update(feed_dict)
augmented_fetches = tensors + fetches
augmented_fetch_vals = session.run(
augmented_fetches, feed_dict=augmented_feed_dict)
if callable(callback):
callback(*augmented_fetch_vals[num_tensors:])
return augmented_fetch_vals[:num_tensors]
return eval_func
|
Construct a function that evaluates a `Tensor` or list of `Tensor`s.
|
def _load_options(self, container, **options):
"""
Select backend specific loading options.
"""
# Force set dict option if available in backend. For example,
# options["object_hook"] will be OrderedDict if 'container' was
# OrderedDict in JSON backend.
for opt in self.dict_options():
options.setdefault(opt, container)
return anyconfig.utils.filter_options(self._load_opts, options)
|
Select backend specific loading options.
|
def numberOfConnectedDistalSynapses(self, cells=None):
"""
Returns the number of connected distal synapses on these cells.
Parameters:
----------------------------
@param cells (iterable)
Indices of the cells. If None return count for all cells.
"""
if cells is None:
cells = xrange(self.numberOfCells())
n = _countWhereGreaterEqualInRows(self.internalDistalPermanences, cells,
self.connectedPermanenceDistal)
for permanences in self.distalPermanences:
n += _countWhereGreaterEqualInRows(permanences, cells,
self.connectedPermanenceDistal)
return n
|
Returns the number of connected distal synapses on these cells.
Parameters:
----------------------------
@param cells (iterable)
Indices of the cells. If None return count for all cells.
|
def parse_results_mol2(mol2_outpath):
"""Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.
Args:
mol2_outpath (str): Path to mol2 output file
Returns:
DataFrame: Pandas DataFrame of the results
"""
docked_ligands = pd.DataFrame()
lines = [line.strip() for line in open(mol2_outpath, 'r')]
props = {}
for i, line in enumerate(lines):
if line.startswith('########## Name:'):
ligand = line.strip().strip('##########').replace(' ', '').replace('\t', '').split(':')[1]
line = lines[i + 1]
props = {}
props['Ligand'] = ligand
if line.startswith('##########'):
splitter = line.strip().strip('##########').replace(' ', '').replace('\t', '').split(':')
props[splitter[0]] = float(splitter[1])
if line.startswith('@<TRIPOS>MOLECULE'):
if props:
docked_ligands = docked_ligands.append(props, ignore_index=True)
return docked_ligands
|
Parse a DOCK6 mol2 output file, return a Pandas DataFrame of the results.
Args:
mol2_outpath (str): Path to mol2 output file
Returns:
DataFrame: Pandas DataFrame of the results
|
def set_server_admin_password(self, server_name, admin_password):
'''
Reset the administrator password for a server.
server_name:
Name of the server to change the password.
admin_password:
The new administrator password for the server.
'''
_validate_not_none('server_name', server_name)
_validate_not_none('admin_password', admin_password)
return self._perform_post(
self._get_servers_path(server_name) + '?op=ResetPassword',
_SqlManagementXmlSerializer.set_server_admin_password_to_xml(
admin_password
)
)
|
Reset the administrator password for a server.
server_name:
Name of the server to change the password.
admin_password:
The new administrator password for the server.
|
def decrypt(self, data, nounce=None):
"""Decrypt data with counter or specified nounce."""
if nounce is None:
nounce = self._in_counter.to_bytes(length=8, byteorder='little')
self._in_counter += 1
decrypted = self._enc_in.open(
b'\x00\x00\x00\x00' + nounce, data, bytes())
if not decrypted:
raise Exception('data decrypt failed') # TODO: new exception
return bytes(decrypted)
|
Decrypt data with counter or specified nounce.
|
def set_connection_logging(self, loadbalancer, val):
"""
Sets the connection logging for the given load balancer.
"""
uri = "/loadbalancers/%s/connectionlogging" % utils.get_id(loadbalancer)
val = str(val).lower()
req_body = {"connectionLogging": {
"enabled": val,
}}
resp, body = self.api.method_put(uri, body=req_body)
return body
|
Sets the connection logging for the given load balancer.
|
def mlem(op, x, data, niter, callback=None, **kwargs):
"""Maximum Likelihood Expectation Maximation algorithm.
Attempts to solve::
max_x L(x | data)
where ``L(x | data)`` is the Poisson likelihood of ``x`` given ``data``.
The likelihood depends on the forward operator ``op`` such that
(approximately)::
op(x) = data
Parameters
----------
op : `Operator`
Forward operator in the inverse problem.
x : ``op.domain`` element
Vector to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
The initial value of ``x`` should be non-negative.
data : ``op.range`` `element-like`
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Other Parameters
----------------
sensitivities : float or ``op.domain`` `element-like`, optional
The algorithm contains a ``A^T 1``
term, if this parameter is given, it is replaced by it.
Default: ``op.adjoint(op.range.one())``
Notes
-----
Given a forward model :math:`A` and data :math:`g`,
the algorithm attempts to find an :math:`x` that maximizes:
.. math::
P(g | g \text{ is } Poisson(A(x)) \text{ distributed}).
The algorithm is explicitly given by:
.. math::
x_{n+1} = \frac{x_n}{A^* 1} A^* (g / A(x_n))
See Also
--------
osmlem : Ordered subsets MLEM
loglikelihood : Function for calculating the logarithm of the likelihood
"""
osmlem([op], x, [data], niter=niter, callback=callback,
**kwargs)
|
Maximum Likelihood Expectation Maximation algorithm.
Attempts to solve::
max_x L(x | data)
where ``L(x | data)`` is the Poisson likelihood of ``x`` given ``data``.
The likelihood depends on the forward operator ``op`` such that
(approximately)::
op(x) = data
Parameters
----------
op : `Operator`
Forward operator in the inverse problem.
x : ``op.domain`` element
Vector to which the result is written. Its initial value is
used as starting point of the iteration, and its values are
updated in each iteration step.
The initial value of ``x`` should be non-negative.
data : ``op.range`` `element-like`
Right-hand side of the equation defining the inverse problem.
niter : int
Number of iterations.
callback : callable, optional
Function called with the current iterate after each iteration.
Other Parameters
----------------
sensitivities : float or ``op.domain`` `element-like`, optional
The algorithm contains a ``A^T 1``
term, if this parameter is given, it is replaced by it.
Default: ``op.adjoint(op.range.one())``
Notes
-----
Given a forward model :math:`A` and data :math:`g`,
the algorithm attempts to find an :math:`x` that maximizes:
.. math::
P(g | g \text{ is } Poisson(A(x)) \text{ distributed}).
The algorithm is explicitly given by:
.. math::
x_{n+1} = \frac{x_n}{A^* 1} A^* (g / A(x_n))
See Also
--------
osmlem : Ordered subsets MLEM
loglikelihood : Function for calculating the logarithm of the likelihood
|
def reset(self):
"""
Clear the values of all attributes of the transaction store.
"""
self.getsCounter = 0
# dictionary of processed requests for each client. Value for each
# client is a dictionary with request id as key and transaction id as
# value
self.processedRequests = {} # type: Dict[str, Dict[int, str]]
# dictionary of responses to be sent for each client. Value for each
# client is an asyncio Queue
self.responses = {} # type: Dict[str, asyncio.Queue]
# dictionary with key as transaction id and `Reply` as
# value
self.transactions = {}
|
Clear the values of all attributes of the transaction store.
|
def chooseStep(self, divisors=None, binary=False):
"""Choose a nice, pretty size for the steps between axis labels.
Our main constraint is that the number of divisions must be taken
from the divisors list. We pick a number of divisions and a step
size that minimizes the amount of whitespace ("slop") that would
need to be included outside of the range [self.minValue,
self.maxValue] if we were to push out the axis values to the next
larger multiples of the step size.
The minimum step that could possibly cover the variance satisfies
minStep * max(divisors) >= variance
or
minStep = variance / max(divisors)
It's not necessarily possible to cover the variance with a step
that size, but we know that any smaller step definitely *cannot*
cover it. So we can start there.
For a sufficiently large step size, it is definitely possible to
cover the variance, but at some point the slop will start growing.
Let's define the slop to be
slop = max(minValue - bottom, top - maxValue)
Then for a given, step size, we know that
slop >= (1/2) * (step * min(divisors) - variance)
(the factor of 1/2 is for the best-case scenario that the slop is
distributed equally on the two sides of the range). So suppose we
already have a choice that yields bestSlop. Then there is no need
to choose steps so large that the slop is guaranteed to be larger
than bestSlop. Therefore, the maximum step size that we need to
consider is
maxStep = (2 * bestSlop + variance) / min(divisors)
"""
self.binary = binary
if divisors is None:
divisors = [4, 5, 6]
else:
for divisor in divisors:
self.checkFinite(divisor, 'divisor')
if divisor < 1:
raise GraphError('Divisors must be greater than or equal '
'to one')
if self.minValue == self.maxValue:
if self.minValue == 0.0:
self.maxValue = 1.0
elif self.minValue < 0.0:
self.minValue *= 1.1
self.maxValue *= 0.9
else:
self.minValue *= 0.9
self.maxValue *= 1.1
variance = self.maxValue - self.minValue
bestSlop = None
bestStep = None
for step in self.generateSteps(variance / float(max(divisors))):
if (
bestSlop is not None and
step * min(divisors) >= 2 * bestSlop + variance
):
break
for divisor in divisors:
slop = self.computeSlop(step, divisor)
if slop is not None and (bestSlop is None or slop < bestSlop):
bestSlop = slop
bestStep = step
self.step = bestStep
|
Choose a nice, pretty size for the steps between axis labels.
Our main constraint is that the number of divisions must be taken
from the divisors list. We pick a number of divisions and a step
size that minimizes the amount of whitespace ("slop") that would
need to be included outside of the range [self.minValue,
self.maxValue] if we were to push out the axis values to the next
larger multiples of the step size.
The minimum step that could possibly cover the variance satisfies
minStep * max(divisors) >= variance
or
minStep = variance / max(divisors)
It's not necessarily possible to cover the variance with a step
that size, but we know that any smaller step definitely *cannot*
cover it. So we can start there.
For a sufficiently large step size, it is definitely possible to
cover the variance, but at some point the slop will start growing.
Let's define the slop to be
slop = max(minValue - bottom, top - maxValue)
Then for a given, step size, we know that
slop >= (1/2) * (step * min(divisors) - variance)
(the factor of 1/2 is for the best-case scenario that the slop is
distributed equally on the two sides of the range). So suppose we
already have a choice that yields bestSlop. Then there is no need
to choose steps so large that the slop is guaranteed to be larger
than bestSlop. Therefore, the maximum step size that we need to
consider is
maxStep = (2 * bestSlop + variance) / min(divisors)
|
def available_modes_with_ids(self):
"""Return list of objects containing available mode name and id."""
if not self._available_mode_ids:
all_modes = FIXED_MODES.copy()
self._available_mode_ids = all_modes
modes = self.get_available_modes()
try:
if modes:
# pylint: disable=consider-using-dict-comprehension
simple_modes = dict(
[(m.get("type", m.get("name")), m.get("id"))
for m in modes]
)
all_modes.update(simple_modes)
self._available_mode_ids = all_modes
except TypeError:
_LOGGER.debug("Did not receive a valid response. Passing..")
return self._available_mode_ids
|
Return list of objects containing available mode name and id.
|
def guess_media_type(filepath):
"""Returns the media-type of the file at the given ``filepath``"""
o = subprocess.check_output(['file', '--mime-type', '-Lb', filepath])
o = o.strip()
return o
|
Returns the media-type of the file at the given ``filepath``
|
def volume_adjusted_moving_average(close_data, volume, period):
"""
Volume Adjusted Moving Average.
Formula:
VAMA = SUM(CLOSE * VolumeRatio) / period
"""
catch_errors.check_for_input_len_diff(close_data, volume)
catch_errors.check_for_period_error(close_data, period)
avg_vol = np.mean(volume)
vol_incr = avg_vol * 0.67
vol_ratio = [val / vol_incr for val in volume]
close_vol = np.array(close_data) * vol_ratio
vama = [sum(close_vol[idx+1-period:idx+1]) / period for idx in range(period-1, len(close_data))]
vama = fill_for_noncomputable_vals(close_data, vama)
return vama
|
Volume Adjusted Moving Average.
Formula:
VAMA = SUM(CLOSE * VolumeRatio) / period
|
def exists(name, path=None):
'''
Returns whether the named container exists.
path
path to the container parent directory (default: /var/lib/lxc)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.exists name
'''
_exists = name in ls_(path=path)
# container may be just created but we did cached earlier the
# lxc-ls results
if not _exists:
_exists = name in ls_(cache=False, path=path)
return _exists
|
Returns whether the named container exists.
path
path to the container parent directory (default: /var/lib/lxc)
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' lxc.exists name
|
def get_pandasframe(self):
"""The method loads data from dataset"""
if self.dataset:
self._load_dimensions()
return self._get_pandasframe_one_dataset()
return self._get_pandasframe_across_datasets()
|
The method loads data from dataset
|
def get_site(self, site_id):
"""
Returns site data.
http://dev.wheniwork.com/#get-existing-site
"""
url = "/2/sites/%s" % site_id
return self.site_from_json(self._get_resource(url)["site"])
|
Returns site data.
http://dev.wheniwork.com/#get-existing-site
|
def add_trendline(self,date0,date1,on='close',text=None,**kwargs):
"""
Adds a trendline to the QuantFigure.
Given 2 dates, the trendline is connected on the data points
that correspond to those dates.
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
kwargs:
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
"""
d={'kind':'trend','date0':date0,'date1':date1,'on':on,'text':text}
d.update(**kwargs)
self.trendlines.append(d)
|
Adds a trendline to the QuantFigure.
Given 2 dates, the trendline is connected on the data points
that correspond to those dates.
Parameters:
date0 : string
Trendline starting date
date1 : string
Trendline end date
on : string
Indicate the data series in which the
trendline should be based.
'close'
'high'
'low'
'open'
text : string
If passed, then an annotation will be added
to the trendline (at mid point)
kwargs:
from_strfmt : string
Defines the date formating in which
date0 and date1 are stated.
default: '%d%b%y'
to_strfmt : string
Defines the date formatting
to which it should be converted.
This should match the same format as the timeseries index.
default : '%Y-%m-%d'
|
def endings(self):
"""The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
"""
if not self.is_tagged(ANALYSIS):
self.tag_analysis()
return self.get_analysis_element(ENDING)
|
The list of word endings.
Ambiguous cases are separated with pipe character by default.
Use :py:meth:`~estnltk.text.Text.get_analysis_element` to specify custom separator for ambiguous entries.
|
def dumpdb(args):
"""
cldf dumpdb <DATASET> <SQLITE_DB_PATH> [<METADATA_PATH>]
"""
if len(args.args) < 2:
raise ParserError('not enough arguments') # pragma: no cover
ds = _get_dataset(args)
db = Database(ds, fname=args.args[1])
mdpath = Path(args.args[2]) if len(args.args) > 2 else ds.tablegroup._fname
args.log.info('dumped db to {0}'.format(db.to_cldf(mdpath.parent, mdname=mdpath.name)))
|
cldf dumpdb <DATASET> <SQLITE_DB_PATH> [<METADATA_PATH>]
|
def write_conll(self, fname):
"""
Serializes the dataset in CONLL format to fname
"""
if 'label' not in self.fields:
raise InvalidFieldsException("dataset is not in CONLL format: missing label field")
def instance_to_conll(inst):
tab = [v for k, v in inst.items() if k != 'label']
return '{}\n{}'.format(inst['label'], '\n'.join(['\t'.join(['-' if e is None else str(e) for e in row]) for row in zip(*tab)]))
with open(fname, 'wb') as f:
f.write('# {}'.format('\t'.join([k for k in self.fields if k != 'label'])))
for i, d in enumerate(self):
f.write('\n{}'.format(instance_to_conll(d)))
if i != len(self) - 1:
f.write('\n')
|
Serializes the dataset in CONLL format to fname
|
def fail(self, cmd, title=None, message=None):
"""Send back captured exceptions"""
if message is None:
message = self.handle_exc()
else:
message = escape(message)
self.db.send(
'Echo|%s' % dump({
'for': escape(title or '%s failed' % cmd),
'val': message
})
)
|
Send back captured exceptions
|
def _knit(fin, fout,
opts_knit='progress=FALSE, verbose=FALSE',
opts_chunk='eval=FALSE'):
"""Use knitr to convert r markdown (or anything knitr supports)
to markdown.
fin / fout - strings, input / output filenames.
opts_knit - string, options to pass to knit
opts_shunk - string, chunk options
options are passed verbatim to knitr:knit running in Rscript.
"""
script = ('sink("/dev/null");'
'library(knitr);'
'opts_knit$set({opts_knit});'
'opts_chunk$set({opts_chunk});'
'knit("{input}", output="{output}")')
rcmd = ('Rscript', '-e',
script.format(input=fin, output=fout,
opts_knit=opts_knit, opts_chunk=opts_chunk)
)
p = subprocess.Popen(rcmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
|
Use knitr to convert r markdown (or anything knitr supports)
to markdown.
fin / fout - strings, input / output filenames.
opts_knit - string, options to pass to knit
opts_shunk - string, chunk options
options are passed verbatim to knitr:knit running in Rscript.
|
def map_pixel_inv(row, col, cellx, celly, xmin, ymax):
'''
Usage:
map_pixel(xcoord, ycoord, x_cell_size, y_cell_size, xmin, ymax)
where:
xmin is leftmost X coordinate in system
ymax is topmost Y coordinate in system
Example:
raster = HMISea.tif
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster)
row, col = map_pixel(x,y,geot[1],geot[-1], geot[0],geot[3])
'''
col = np.asarray(col)
row = np.asarray(row)
point_x = xmin+col*cellx
point_y = ymax+row*celly
return point_x, point_y
|
Usage:
map_pixel(xcoord, ycoord, x_cell_size, y_cell_size, xmin, ymax)
where:
xmin is leftmost X coordinate in system
ymax is topmost Y coordinate in system
Example:
raster = HMISea.tif
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(raster)
row, col = map_pixel(x,y,geot[1],geot[-1], geot[0],geot[3])
|
def close(self):
"""Close the stream."""
self.flush()
self.stream.close()
logging.StreamHandler.close(self)
|
Close the stream.
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CountryContext for this CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
"""
if self._context is None:
self._context = CountryContext(self._version, iso_code=self._solution['iso_code'], )
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: CountryContext for this CountryInstance
:rtype: twilio.rest.voice.v1.dialing_permissions.country.CountryContext
|
def datashape_type_to_numpy(type_):
"""
Given a datashape type, return the associated numpy type. Maps
datashape's DateTime type to numpy's `datetime64[ns]` dtype, since the
numpy datetime returned by datashape isn't supported by pipeline.
Parameters
----------
type_: datashape.coretypes.Type
The datashape type.
Returns
-------
type_ np.dtype
The numpy dtype.
"""
if isinstance(type_, Option):
type_ = type_.ty
if isinstance(type_, DateTime):
return np.dtype('datetime64[ns]')
if isinstance(type_, String):
return np.dtype(object)
if type_ in integral:
return np.dtype('int64')
else:
return type_.to_numpy_dtype()
|
Given a datashape type, return the associated numpy type. Maps
datashape's DateTime type to numpy's `datetime64[ns]` dtype, since the
numpy datetime returned by datashape isn't supported by pipeline.
Parameters
----------
type_: datashape.coretypes.Type
The datashape type.
Returns
-------
type_ np.dtype
The numpy dtype.
|
def process_response(self, request, response):
"""Sets the cache, if needed."""
if not hasattr(request, '_cache_update_cache') or not request._cache_update_cache:
# We don't need to update the cache, just return.
return response
if request.method != 'GET':
# This is a stronger requirement than above. It is needed
# because of interactions between this middleware and the
# HTTPMiddleware, which throws the body of a HEAD-request
# away before this middleware gets a chance to cache it.
return response
if not response.status_code == 200:
return response
# Try to get the timeout from the "max-age" section of the "Cache-
# Control" header before reverting to using the default cache_timeout
# length.
timeout = get_max_age(response)
if timeout == None:
timeout = self.cache_timeout
elif timeout == 0:
# max-age was set to 0, don't bother caching.
return response
patch_response_headers(response, timeout)
if timeout:
cache_key = learn_cache_key(request, response, timeout, self.key_prefix)
cache.set(cache_key, response, timeout)
logging.debug("UpdateCacheMiddleware: setting %s -> %s params are: %s" % (cache_key, request.path, get_cache_key_parameters(request)))
return response
|
Sets the cache, if needed.
|
def plot_final(self, ax):
'''
Plots the final de-trended light curve.
'''
# Plot the light curve
bnmask = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
def M(x): return np.delete(x, bnmask)
if (self.cadence == 'lc') or (len(self.time) < 4000):
ax.plot(M(self.time), M(self.flux), ls='none',
marker='.', color='k', markersize=2, alpha=0.3)
else:
ax.plot(M(self.time), M(self.flux), ls='none', marker='.',
color='k', markersize=2, alpha=0.03, zorder=-1)
ax.set_rasterization_zorder(0)
# Hack: Plot invisible first and last points to ensure
# the x axis limits are the
# same in the other plots, where we also plot outliers!
ax.plot(self.time[0], np.nanmedian(M(self.flux)), marker='.', alpha=0)
ax.plot(self.time[-1], np.nanmedian(M(self.flux)), marker='.', alpha=0)
# Plot the GP (long cadence only)
if self.cadence == 'lc':
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(self.apply_mask(self.time),
self.apply_mask(self.fraw_err))
med = np.nanmedian(self.apply_mask(self.flux))
y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time)
y += med
ax.plot(M(self.time), M(y), 'r-', lw=0.5, alpha=0.5)
# Compute the CDPP of the GP-detrended flux
self.cdppg = self._mission.CDPP(self.apply_mask(
self.flux - y + med), cadence=self.cadence)
else:
# We're not going to calculate this
self.cdppg = 0.
# Appearance
ax.annotate('Final', xy=(0.98, 0.025), xycoords='axes fraction',
ha='right', va='bottom', fontsize=10, alpha=0.5,
fontweight='bold')
ax.margins(0.01, 0.1)
# Get y lims that bound 99% of the flux
flux = np.delete(self.flux, bnmask)
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
fsort = flux[np.argsort(flux)]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
ax.get_yaxis().set_major_formatter(Formatter.Flux)
|
Plots the final de-trended light curve.
|
async def handle_json_response(responses):
"""
get the json data response
:param responses: the json response
:return the json data without 'root' node
"""
json_data = {}
if responses.status != 200:
err_msg = HttpProcessingError(code=responses.status,
message=await responses.json())
logging.error("Wallabag: aiohttp error {err_msg}".format(
err_msg=err_msg))
else:
try:
json_data = responses.json()
except ClientResponseError as e:
# sometimes json_data does not return any json() without
# any error. This is due to the grabbing URL which "rejects"
# the URL
logging.error("Wallabag: aiohttp error {code} {message}"
.format(code=e.code, message=e.message))
return await json_data
|
get the json data response
:param responses: the json response
:return the json data without 'root' node
|
def hide_routemap_holder_route_map_content_set_origin_origin_igp(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hide_routemap_holder = ET.SubElement(config, "hide-routemap-holder", xmlns="urn:brocade.com:mgmt:brocade-ip-policy")
route_map = ET.SubElement(hide_routemap_holder, "route-map")
name_key = ET.SubElement(route_map, "name")
name_key.text = kwargs.pop('name')
action_rm_key = ET.SubElement(route_map, "action-rm")
action_rm_key.text = kwargs.pop('action_rm')
instance_key = ET.SubElement(route_map, "instance")
instance_key.text = kwargs.pop('instance')
content = ET.SubElement(route_map, "content")
set = ET.SubElement(content, "set")
origin = ET.SubElement(set, "origin")
origin_igp = ET.SubElement(origin, "origin-igp")
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def set_kill_on_exit_mode(bKillOnExit = False):
"""
Defines the behavior of the debugged processes when the debugging
thread dies. This method only affects the calling thread.
Works on the following platforms:
- Microsoft Windows XP and above.
- Wine (Windows Emulator).
Fails on the following platforms:
- Microsoft Windows 2000 and below.
- ReactOS.
@type bKillOnExit: bool
@param bKillOnExit: C{True} to automatically kill processes when the
debugger thread dies. C{False} to automatically detach from
processes when the debugger thread dies.
@rtype: bool
@return: C{True} on success, C{False} on error.
@note:
This call will fail if a debug port was not created. That is, if
the debugger isn't attached to at least one process. For more info
see: U{http://msdn.microsoft.com/en-us/library/ms679307.aspx}
"""
try:
# won't work before calling CreateProcess or DebugActiveProcess
win32.DebugSetProcessKillOnExit(bKillOnExit)
except (AttributeError, WindowsError):
return False
return True
|
Defines the behavior of the debugged processes when the debugging
thread dies. This method only affects the calling thread.
Works on the following platforms:
- Microsoft Windows XP and above.
- Wine (Windows Emulator).
Fails on the following platforms:
- Microsoft Windows 2000 and below.
- ReactOS.
@type bKillOnExit: bool
@param bKillOnExit: C{True} to automatically kill processes when the
debugger thread dies. C{False} to automatically detach from
processes when the debugger thread dies.
@rtype: bool
@return: C{True} on success, C{False} on error.
@note:
This call will fail if a debug port was not created. That is, if
the debugger isn't attached to at least one process. For more info
see: U{http://msdn.microsoft.com/en-us/library/ms679307.aspx}
|
def prepare(self, config_file=None, user=None, password=None, **kwargs):
"""登录的统一接口
:param config_file 登录数据文件,若无则选择参数登录模式
:param user: 各家券商的账号或者雪球的用户名
:param password: 密码, 券商为加密后的密码,雪球为明文密码
:param account: [雪球登录需要]雪球手机号(邮箱手机二选一)
:param portfolio_code: [雪球登录需要]组合代码
:param portfolio_market: [雪球登录需要]交易市场,
可选['cn', 'us', 'hk'] 默认 'cn'
"""
if config_file is not None:
self.read_config(config_file)
else:
self._prepare_account(user, password, **kwargs)
self.autologin()
|
登录的统一接口
:param config_file 登录数据文件,若无则选择参数登录模式
:param user: 各家券商的账号或者雪球的用户名
:param password: 密码, 券商为加密后的密码,雪球为明文密码
:param account: [雪球登录需要]雪球手机号(邮箱手机二选一)
:param portfolio_code: [雪球登录需要]组合代码
:param portfolio_market: [雪球登录需要]交易市场,
可选['cn', 'us', 'hk'] 默认 'cn'
|
def query_parent_objects(self, context, query=None):
"""Return the objects of the same type from the parent object
:param query: Catalog query to narrow down the objects
:type query: dict
:returns: Content objects of the same portal type in the parent
"""
# return the object values if we have no catalog query
if query is None:
return self.get_parent_objects(context)
# avoid undefined reference of catalog in except...
catalog = None
# try to fetch the results via the catalog
try:
catalogs = api.get_catalogs_for(context)
catalog = catalogs[0]
return map(api.get_object, catalog(query))
except (IndexError, UnicodeDecodeError, ParseError, APIError) as e:
# fall back to the object values of the parent
logger.warn("UniqueFieldValidator: Catalog query {} failed "
"for catalog {} ({}) -> returning object values of {}"
.format(query, repr(catalog), str(e),
repr(api.get_parent(context))))
return self.get_parent_objects(context)
|
Return the objects of the same type from the parent object
:param query: Catalog query to narrow down the objects
:type query: dict
:returns: Content objects of the same portal type in the parent
|
def clear_sonos_playlist(self, sonos_playlist, update_id=0):
"""Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
"""
if not isinstance(sonos_playlist, DidlPlaylistContainer):
sonos_playlist = self.get_sonos_playlist_by_attr('item_id',
sonos_playlist)
count = self.music_library.browse(ml_item=sonos_playlist).total_matches
tracks = ','.join([str(x) for x in range(count)])
if tracks:
return self.reorder_sonos_playlist(sonos_playlist, tracks=tracks,
new_pos='', update_id=update_id)
else:
return {'change': 0, 'update_id': update_id, 'length': count}
|
Clear all tracks from a Sonos playlist.
This is a convenience method for :py:meth:`reorder_sonos_playlist`.
Example::
device.clear_sonos_playlist(sonos_playlist)
Args:
sonos_playlist
(:py:class:`~.soco.data_structures.DidlPlaylistContainer`):
Sonos playlist object or the item_id (str) of the Sonos
playlist.
update_id (int): Optional update counter for the object. If left
at the default of 0, it will be looked up.
Returns:
dict: See :py:meth:`reorder_sonos_playlist`
Raises:
ValueError: If sonos_playlist specified by string and is not found.
SoCoUPnPException: See :py:meth:`reorder_sonos_playlist`
|
def insertOntology(self, ontology):
"""
Inserts the specified ontology into this repository.
"""
try:
models.Ontology.create(
id=ontology.getName(),
name=ontology.getName(),
dataurl=ontology.getDataUrl(),
ontologyprefix=ontology.getOntologyPrefix())
except Exception:
raise exceptions.DuplicateNameException(
ontology.getName())
|
Inserts the specified ontology into this repository.
|
def rotate_in_plane(chi, phase):
"""For transforming spins between the coprecessing and coorbital frames"""
v = chi.T
sp = np.sin(phase)
cp = np.cos(phase)
res = 1.*v
res[0] = v[0]*cp + v[1]*sp
res[1] = v[1]*cp - v[0]*sp
return res.T
|
For transforming spins between the coprecessing and coorbital frames
|
def padRectEqually(rect, padding, bounds, clipExcess = True):
"""
Applies equal padding to all sides of a rectangle,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
"""
return padRect(rect, padding, padding, padding, padding, bounds, clipExcess)
|
Applies equal padding to all sides of a rectangle,
ensuring the padded rectangle falls within the specified bounds.
The input rectangle, bounds, and return value are all a tuple of (x,y,w,h).
|
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated
with schema.validate"""
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
|
Yield all HTTP methods in ``rh`` that are decorated
with schema.validate
|
def do__relative_load(self, args: argparse.Namespace) -> None:
"""Run commands in script file that is encoded as either ASCII or UTF-8 text"""
file_path = args.file_path
# NOTE: Relative path is an absolute path, it is just relative to the current script directory
relative_path = os.path.join(self._current_script_dir or '', file_path)
self.do_load(relative_path)
|
Run commands in script file that is encoded as either ASCII or UTF-8 text
|
def data_check(data,target):
""" Checks data type
Parameters
----------
data : pd.DataFrame or np.array
Field to specify the time series data that will be used.
target : int or str
Target column
Returns
----------
transformed_data : np.array
Raw data array for use in the model
data_name : str
Name of the data
is_pandas : Boolean
True if pandas data, else numpy
data_index : np.array
The time indices for the data
"""
# Check pandas or numpy
if isinstance(data, pd.DataFrame) or isinstance(data, pd.core.frame.DataFrame):
data_index = data.index
if target is None:
transformed_data = data.ix[:,0].values
data_name = str(data.columns.values[0])
else:
transformed_data = data[target].values
data_name = str(target)
is_pandas = True
elif isinstance(data, np.ndarray):
data_name = "Series"
is_pandas = False
if any(isinstance(i, np.ndarray) for i in data):
if target is None:
transformed_data = data[0]
data_index = list(range(len(data[0])))
else:
transformed_data = data[target]
data_index = list(range(len(data[target])))
else:
transformed_data = data
data_index = list(range(len(data)))
else:
raise Exception("The data input is not pandas or numpy compatible!")
return transformed_data, data_name, is_pandas, data_index
|
Checks data type
Parameters
----------
data : pd.DataFrame or np.array
Field to specify the time series data that will be used.
target : int or str
Target column
Returns
----------
transformed_data : np.array
Raw data array for use in the model
data_name : str
Name of the data
is_pandas : Boolean
True if pandas data, else numpy
data_index : np.array
The time indices for the data
|
def emit(self, record):
"""Store the message, not only the record."""
self.records.append(Record(levelno=record.levelno, levelname=record.levelname,
message=self.format(record)))
return super(SetupLogChecker, self).emit(record)
|
Store the message, not only the record.
|
def extract_common(self, keys):
"""
Return a new segmentlistdict containing only those
segmentlists associated with the keys in keys, with each
set to their mutual intersection. The offsets are
preserved.
"""
keys = set(keys)
new = self.__class__()
intersection = self.intersection(keys)
for key in keys:
dict.__setitem__(new, key, _shallowcopy(intersection))
dict.__setitem__(new.offsets, key, self.offsets[key])
return new
|
Return a new segmentlistdict containing only those
segmentlists associated with the keys in keys, with each
set to their mutual intersection. The offsets are
preserved.
|
def get_num_nodes(properties=None, hadoop_conf_dir=None, offline=False):
"""
Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`.
"""
return len(get_task_trackers(properties, hadoop_conf_dir, offline))
|
Get the number of task trackers in the Hadoop cluster.
All arguments are passed to :func:`get_task_trackers`.
|
def make_parser(defaults=None):
"""
:param defaults: Default option values
"""
if defaults is None:
defaults = DEFAULTS
ctypes = API.list_types()
ctypes_s = ", ".join(ctypes)
type_help = "Select type of %s config files from " + \
ctypes_s + " [Automatically detected by file ext]"
mts = API.MERGE_STRATEGIES
mts_s = ", ".join(mts)
mt_help = "Select strategy to merge multiple configs from " + \
mts_s + " [%(merge)s]" % defaults
parser = argparse.ArgumentParser(usage=USAGE)
parser.set_defaults(**defaults)
parser.add_argument("inputs", type=str, nargs='*', help="Input files")
parser.add_argument("--version", action="version",
version="%%(prog)s %s" % anyconfig.globals.VERSION)
lpog = parser.add_argument_group("List specific options")
lpog.add_argument("-L", "--list", action="store_true",
help="List supported config types")
spog = parser.add_argument_group("Schema specific options")
spog.add_argument("--validate", action="store_true",
help="Only validate input files and do not output. "
"You must specify schema file with -S/--schema "
"option.")
spog.add_argument("--gen-schema", action="store_true",
help="Generate JSON schema for givne config file[s] "
"and output it instead of (merged) configuration.")
gspog = parser.add_argument_group("Query/Get/set options")
gspog.add_argument("-Q", "--query", help=_QUERY_HELP)
gspog.add_argument("--get", help=_GET_HELP)
gspog.add_argument("--set", help=_SET_HELP)
parser.add_argument("-o", "--output", help="Output file path")
parser.add_argument("-I", "--itype", choices=ctypes, metavar="ITYPE",
help=(type_help % "Input"))
parser.add_argument("-O", "--otype", choices=ctypes, metavar="OTYPE",
help=(type_help % "Output"))
parser.add_argument("-M", "--merge", choices=mts, metavar="MERGE",
help=mt_help)
parser.add_argument("-A", "--args", help="Argument configs to override")
parser.add_argument("--atype", choices=ctypes, metavar="ATYPE",
help=_ATYPE_HELP_FMT % ctypes_s)
cpog = parser.add_argument_group("Common options")
cpog.add_argument("-x", "--ignore-missing", action="store_true",
help="Ignore missing input files")
cpog.add_argument("-T", "--template", action="store_true",
help="Enable template config support")
cpog.add_argument("-E", "--env", action="store_true",
help="Load configuration defaults from "
"environment values")
cpog.add_argument("-S", "--schema", help="Specify Schema file[s] path")
cpog.add_argument("-e", "--extra-opts",
help="Extra options given to the API call, "
"--extra-options indent:2 (specify the "
"indent for pretty-printing of JSON outputs) "
"for example")
cpog.add_argument("-v", "--verbose", action="count", dest="loglevel",
help="Verbose mode; -v or -vv (more verbose)")
return parser
|
:param defaults: Default option values
|
def load_notebook_node(notebook_path):
"""Returns a notebook object with papermill metadata loaded from the specified path.
Args:
notebook_path (str): Path to the notebook file.
Returns:
nbformat.NotebookNode
"""
nb = nbformat.reads(papermill_io.read(notebook_path), as_version=4)
if not hasattr(nb.metadata, 'papermill'):
nb.metadata['papermill'] = {
'parameters': dict(),
'environment_variables': dict(),
'version': __version__,
}
for cell in nb.cells:
if not hasattr(cell.metadata, 'tags'):
cell.metadata['tags'] = [] # Create tags attr if one doesn't exist.
if not hasattr(cell.metadata, 'papermill'):
cell.metadata['papermill'] = dict()
return nb
|
Returns a notebook object with papermill metadata loaded from the specified path.
Args:
notebook_path (str): Path to the notebook file.
Returns:
nbformat.NotebookNode
|
def _splitit(self, line, isheader):
"""Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
"""
line_wrapped = []
for cell, width in zip(line, self._width):
array = []
for c in cell.split('\n'):
array.extend(textwrap.wrap(unicode(c, 'utf'), width))
line_wrapped.append(array)
max_cell_lines = reduce(max, map(len, line_wrapped))
for cell, valign in zip(line_wrapped, self._valign):
if isheader:
valign = "t"
if valign == "m":
missing = max_cell_lines - len(cell)
cell[:0] = [""] * (missing / 2)
cell.extend([""] * (missing / 2 + missing % 2))
elif valign == "b":
cell[:0] = [""] * (max_cell_lines - len(cell))
else:
cell.extend([""] * (max_cell_lines - len(cell)))
return line_wrapped
|
Split each element of line to fit the column width
Each element is turned into a list, result of the wrapping of the
string to the desired width
|
def elbow_method(data, k_min, k_max, distance='euclidean'):
"""
Calculates and plots the plot of variance explained - number of clusters
Implementation reference: https://github.com/sarguido/k-means-clustering.rst
:param data: The dataset
:param k_min: lowerbound of the cluster range
:param k_max: upperbound of the cluster range
:param distance: the distance metric, 'euclidean' by default
:return:
"""
# Determine your k range
k_range = range(k_min, k_max)
# Fit the kmeans model for each n_clusters = k
k_means_var = [Clustering.kmeans(k).fit(data) for k in k_range]
# Pull out the cluster centers for each model
centroids = [X.model.cluster_centers_ for X in k_means_var]
# Calculate the Euclidean distance from
# each point to each cluster center
k_euclid = [cdist(data, cent, distance) for cent in centroids]
dist = [np.min(ke, axis=1) for ke in k_euclid]
# Total within-cluster sum of squares
wcss = [sum(d ** 2) for d in dist]
# The total sum of squares
tss = sum(pdist(data) ** 2) / data.shape[0]
# The between-cluster sum of squares
bss = tss - wcss
# elbow curve
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(k_range, bss / tss * 100, 'b*-')
ax.set_ylim((0, 100))
plt.grid(True)
plt.xlabel('n_clusters')
plt.ylabel('Percentage of variance explained')
plt.title('Variance Explained vs. k')
plt.show()
|
Calculates and plots the plot of variance explained - number of clusters
Implementation reference: https://github.com/sarguido/k-means-clustering.rst
:param data: The dataset
:param k_min: lowerbound of the cluster range
:param k_max: upperbound of the cluster range
:param distance: the distance metric, 'euclidean' by default
:return:
|
def index_resolver(index, strict=False):
"""Returns a function that accepts a value and returns index[value]."""
if strict:
return lambda id_: index[id_]
else:
return index.get
|
Returns a function that accepts a value and returns index[value].
|
def get_departures(self, station):
"""
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
"""
url = 'http://webservices.ns.nl/ns-api-avt?station=' + station
raw_departures = self._request('GET', url)
return self.parse_departures(raw_departures)
|
Fetch the current departure times from this station
http://webservices.ns.nl/ns-api-avt?station=${Naam of afkorting Station}
@param station: station to lookup
|
def create(obj: PersistedObject, obj_type: Type[T], extensions_supported: Iterable[str]):
"""
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param obj_type:
:param extensions_supported:
:return:
"""
# base message
msg = "{obj} cannot be parsed as a {typ} because no parser supporting that extension ({ext}) is able to " \
"create this type of object." \
"".format(obj=obj, typ=get_pretty_type_str(obj_type), ext=obj.get_pretty_file_ext())
# add details
if extensions_supported is not None and len(extensions_supported) > 0:
msg += " If you wish to parse this fileobject to that precise type, you may wish to either " \
"(1) replace the file with any of the following extensions currently supported : {exts} " \
"(see get_capabilities_for_type({typ}, strict_type_matching=False) for details)." \
" Or (2) register a new parser." \
"".format(exts=extensions_supported, typ=get_pretty_type_str(obj_type))
else:
raise ValueError('extensions_supported should be provided to create a NoParserFoundForObjectExt. If no '
'extension is supported, use NoParserFoundForObjectType.create instead')
e = NoParserFoundForObjectExt(msg)
# save the extensions supported
e.extensions_supported = extensions_supported
return e
|
Helper method provided because we actually can't put that in the constructor, it creates a bug in Nose tests
https://github.com/nose-devs/nose/issues/725
:param obj:
:param obj_type:
:param extensions_supported:
:return:
|
def get_subtask_fields(config_class):
"""Get all configurable subtask fields from a Config class.
Parameters
----------
config_class : ``lsst.pipe.base.Config``-type
The configuration class (not an instance) corresponding to a Task.
Returns
-------
subtask_fields : `dict`
Mapping where keys are the config attribute names and values are
subclasses of ``lsst.pex.config.ConfigurableField`` or
``RegistryField``). The mapping is alphabetically ordered by
attribute name.
"""
from lsst.pex.config import ConfigurableField, RegistryField
def is_subtask_field(obj):
return isinstance(obj, (ConfigurableField, RegistryField))
return _get_alphabetical_members(config_class, is_subtask_field)
|
Get all configurable subtask fields from a Config class.
Parameters
----------
config_class : ``lsst.pipe.base.Config``-type
The configuration class (not an instance) corresponding to a Task.
Returns
-------
subtask_fields : `dict`
Mapping where keys are the config attribute names and values are
subclasses of ``lsst.pex.config.ConfigurableField`` or
``RegistryField``). The mapping is alphabetically ordered by
attribute name.
|
def get_configuration_dict(self, secret_attrs=False):
"""Type-specific configuration for backward compatibility"""
cd = {'repo_nexml2json': self.repo_nexml2json,
'number_of_shards': len(self._shards),
'initialization': self._filepath_args,
'shards': [],
}
for i in self._shards:
cd['shards'].append(i.get_configuration_dict(secret_attrs=secret_attrs))
return cd
|
Type-specific configuration for backward compatibility
|
def rows(self, offs):
'''
Iterate over raw indx, bytes tuples from a given offset.
'''
lkey = s_common.int64en(offs)
for lkey, byts in self.slab.scanByRange(lkey, db=self.db):
indx = s_common.int64un(lkey)
yield indx, byts
|
Iterate over raw indx, bytes tuples from a given offset.
|
def rank_dated_files(pattern, dir, descending=True):
"""Search a directory for files that match a pattern. Return an ordered list of these files by filename.
Args:
pattern: The glob pattern to search for.
dir: Path to directory where the files will be searched for.
descending: Default True, will sort alphabetically by descending order.
Returns:
list: Rank-ordered list by filename.
"""
files = glob.glob(op.join(dir, pattern))
return sorted(files, reverse=descending)
|
Search a directory for files that match a pattern. Return an ordered list of these files by filename.
Args:
pattern: The glob pattern to search for.
dir: Path to directory where the files will be searched for.
descending: Default True, will sort alphabetically by descending order.
Returns:
list: Rank-ordered list by filename.
|
def amplify_ground_shaking(T, vs30, gmvs):
"""
:param T: period
:param vs30: velocity
:param gmvs: ground motion values for the current site in units of g
"""
gmvs[gmvs > MAX_GMV] = MAX_GMV # accelerations > 5g are absurd
interpolator = interpolate.interp1d(
[0, 0.1, 0.2, 0.3, 0.4, 5],
[(760 / vs30)**0.35,
(760 / vs30)**0.35,
(760 / vs30)**0.25,
(760 / vs30)**0.10,
(760 / vs30)**-0.05,
(760 / vs30)**-0.05],
) if T <= 0.3 else interpolate.interp1d(
[0, 0.1, 0.2, 0.3, 0.4, 5],
[(760 / vs30)**0.65,
(760 / vs30)**0.65,
(760 / vs30)**0.60,
(760 / vs30)**0.53,
(760 / vs30)**0.45,
(760 / vs30)**0.45],
)
return interpolator(gmvs) * gmvs
|
:param T: period
:param vs30: velocity
:param gmvs: ground motion values for the current site in units of g
|
def find_equips(
self,
name,
iexact,
environment,
equip_type,
group,
ip,
pagination):
"""
Find vlans by all search parameters
:param name: Filter by vlan name column
:param iexact: Filter by name will be exact?
:param environment: Filter by environment ID related
:param equip_type: Filter by equipment_type ID related
:param group: Filter by equipment group ID related
:param ip: Filter by each octs in ips related
:param pagination: Class with all data needed to paginate
:return: Following dictionary:
::
{'equipamento': {'id': < id_vlan >,
'nome': < nome_vlan >,
'num_vlan': < num_vlan >,
'id_ambiente': < id_ambiente >,
'descricao': < descricao >,
'acl_file_name': < acl_file_name >,
'acl_valida': < acl_valida >,
'ativada': < ativada >,
'ambiente_name': < divisao_dc-ambiente_logico-grupo_l3 >
'redeipv4': [ { all networkipv4 related } ],
'redeipv6': [ { all networkipv6 related } ] },
'total': {< total_registros >} }
:raise InvalidParameterError: Some parameter was invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not isinstance(pagination, Pagination):
raise InvalidParameterError(
u"Invalid parameter: pagination must be a class of type 'Pagination'.")
equip_map = dict()
equip_map["start_record"] = pagination.start_record
equip_map["end_record"] = pagination.end_record
equip_map["asorting_cols"] = pagination.asorting_cols
equip_map["searchable_columns"] = pagination.searchable_columns
equip_map["custom_search"] = pagination.custom_search
equip_map["nome"] = name
equip_map["exato"] = iexact
equip_map["ambiente"] = environment
equip_map["tipo_equipamento"] = equip_type
equip_map["grupo"] = group
equip_map["ip"] = ip
url = "equipamento/find/"
code, xml = self.submit({"equipamento": equip_map}, "POST", url)
key = "equipamento"
return get_list_map(
self.response(
code, xml, [
key, "ips", "grupos"]), key)
|
Find vlans by all search parameters
:param name: Filter by vlan name column
:param iexact: Filter by name will be exact?
:param environment: Filter by environment ID related
:param equip_type: Filter by equipment_type ID related
:param group: Filter by equipment group ID related
:param ip: Filter by each octs in ips related
:param pagination: Class with all data needed to paginate
:return: Following dictionary:
::
{'equipamento': {'id': < id_vlan >,
'nome': < nome_vlan >,
'num_vlan': < num_vlan >,
'id_ambiente': < id_ambiente >,
'descricao': < descricao >,
'acl_file_name': < acl_file_name >,
'acl_valida': < acl_valida >,
'ativada': < ativada >,
'ambiente_name': < divisao_dc-ambiente_logico-grupo_l3 >
'redeipv4': [ { all networkipv4 related } ],
'redeipv6': [ { all networkipv6 related } ] },
'total': {< total_registros >} }
:raise InvalidParameterError: Some parameter was invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
|
def main():
'''
Parse command line options and launch the interpreter
'''
parser = optparse.OptionParser(usage="%prog [options] <model_path> [another_model_path..]",
version=xtuml.version.complete_string,
formatter=optparse.TitledHelpFormatter())
parser.add_option("-v", "--verbosity", dest='verbosity', action="count",
default=1, help="increase debug logging level")
parser.add_option("-f", "--function", dest='function', action="store",
help="invoke function named NAME", metavar='NAME')
parser.add_option("-c", "--component", dest='component', action="store",
help="look for the function in a component named NAME",
metavar='NAME', default=None)
(opts, args) = parser.parse_args()
if len(args) == 0 or not opts.function:
parser.print_help()
sys.exit(1)
levels = {
0: logging.ERROR,
1: logging.WARNING,
2: logging.INFO,
3: logging.DEBUG,
}
logging.basicConfig(level=levels.get(opts.verbosity, logging.DEBUG))
from bridgepoint import ooaofooa
mm = ooaofooa.load_metamodel(args)
c_c = mm.select_any('C_C', where(Name=opts.component))
domain = ooaofooa.mk_component(mm, c_c, derived_attributes=False)
func = domain.find_symbol(opts.function)
return func()
|
Parse command line options and launch the interpreter
|
def focal(self):
"""
Get the focal length in pixels for the camera.
Returns
------------
focal : (2,) float
Focal length in pixels
"""
if self._focal is None:
# calculate focal length from FOV
focal = [(px / 2.0) / np.tan(np.radians(fov / 2.0))
for px, fov in zip(self._resolution, self.fov)]
# store as correct dtype
self._focal = np.asanyarray(focal, dtype=np.float64)
return self._focal
|
Get the focal length in pixels for the camera.
Returns
------------
focal : (2,) float
Focal length in pixels
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.