code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def visit_BitVecSub(self, expression, *operands):
""" a - 0 ==> 0
(a + b) - b ==> a
(b + a) - b ==> a
"""
left = expression.operands[0]
right = expression.operands[1]
if isinstance(left, BitVecAdd):
if self._same_constant(left.operands[0], right):
return left.operands[1]
elif self._same_constant(left.operands[1], right):
return left.operands[0]
|
a - 0 ==> 0
(a + b) - b ==> a
(b + a) - b ==> a
|
def serialize(self, content):
""" Serialize to JSONP.
:return string: serializaed JSONP
"""
content = super(JSONPEmitter, self).serialize(content)
callback = self.request.GET.get('callback', 'callback')
return u'%s(%s)' % (callback, content)
|
Serialize to JSONP.
:return string: serializaed JSONP
|
def set_privkey_compressed(privkey, compressed=True):
"""
Make sure the private key given is compressed or not compressed
"""
if len(privkey) != 64 and len(privkey) != 66:
raise ValueError("expected 32-byte private key as a hex string")
# compressed?
if compressed and len(privkey) == 64:
privkey += '01'
if not compressed and len(privkey) == 66:
if privkey[-2:] != '01':
raise ValueError("private key does not end in '01'")
privkey = privkey[:-2]
return privkey
|
Make sure the private key given is compressed or not compressed
|
def dump(self):
"""Prints out the contents of the import map."""
for modpath in sorted(self.map):
title = 'Imports in %s' % modpath
print('\n' + title + '\n' + '-'*len(title))
for name, value in sorted(self.map.get(modpath, {}).items()):
print(' %s -> %s' % (name, ', '.join(sorted(value))))
|
Prints out the contents of the import map.
|
def element_at(index):
"""Create a transducer which obtains the item at the specified index."""
if index < 0:
raise IndexError("element_at used with illegal index {}".format(index))
def element_at_transducer(reducer):
return ElementAt(reducer, index)
return element_at_transducer
|
Create a transducer which obtains the item at the specified index.
|
def classinstances(cls):
"""Return all instances of the current class
JB_Gui will not return the instances of subclasses
A subclass will only return the instances that have the same
type as the subclass. So it won\'t return instances of further subclasses.
:returns: all instnaces of the current class
:rtype: list
:raises: None
"""
l = [i for i in cls.allinstances() if type(i) == cls]
return l
|
Return all instances of the current class
JB_Gui will not return the instances of subclasses
A subclass will only return the instances that have the same
type as the subclass. So it won\'t return instances of further subclasses.
:returns: all instnaces of the current class
:rtype: list
:raises: None
|
def setWidth(self, typeID, width):
"""setWidth(string, double) -> None
Sets the width in m of vehicles of this type.
"""
self._connection._sendDoubleCmd(
tc.CMD_SET_VEHICLETYPE_VARIABLE, tc.VAR_WIDTH, typeID, width)
|
setWidth(string, double) -> None
Sets the width in m of vehicles of this type.
|
def _ExtractContentSettingsExceptions(self, exceptions_dict, parser_mediator):
"""Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
"""
for permission in exceptions_dict:
if permission not in self._EXCEPTIONS_KEYS:
continue
exception_dict = exceptions_dict.get(permission, {})
for urls, url_dict in exception_dict.items():
last_used = url_dict.get('last_used', None)
if not last_used:
continue
# If secondary_url is '*', the permission applies to primary_url.
# If secondary_url is a valid URL, the permission applies to
# elements loaded from secondary_url being embedded in primary_url.
primary_url, secondary_url = urls.split(',')
event_data = ChromeContentSettingsExceptionsEventData()
event_data.permission = permission
event_data.primary_url = primary_url
event_data.secondary_url = secondary_url
timestamp = int(last_used * 1000000)
date_time = dfdatetime_posix_time.PosixTimeInMicroseconds(
timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Extracts site specific events.
Args:
exceptions_dict (dict): Permission exceptions data from Preferences file.
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
|
def decode(self, encoded_packet):
"""Decode a transmitted package."""
b64 = False
if not isinstance(encoded_packet, binary_types):
encoded_packet = encoded_packet.encode('utf-8')
elif not isinstance(encoded_packet, bytes):
encoded_packet = bytes(encoded_packet)
self.packet_type = six.byte2int(encoded_packet[0:1])
if self.packet_type == 98: # 'b' --> binary base64 encoded packet
self.binary = True
encoded_packet = encoded_packet[1:]
self.packet_type = six.byte2int(encoded_packet[0:1])
self.packet_type -= 48
b64 = True
elif self.packet_type >= 48:
self.packet_type -= 48
self.binary = False
else:
self.binary = True
self.data = None
if len(encoded_packet) > 1:
if self.binary:
if b64:
self.data = base64.b64decode(encoded_packet[1:])
else:
self.data = encoded_packet[1:]
else:
try:
self.data = self.json.loads(
encoded_packet[1:].decode('utf-8'))
if isinstance(self.data, int):
# do not allow integer payloads, see
# github.com/miguelgrinberg/python-engineio/issues/75
# for background on this decision
raise ValueError
except ValueError:
self.data = encoded_packet[1:].decode('utf-8')
|
Decode a transmitted package.
|
def _process_request(self, request, client_address):
"""Actually processes the request."""
try:
self.finish_request(request, client_address)
except Exception:
self.handle_error(request, client_address)
finally:
self.shutdown_request(request)
|
Actually processes the request.
|
def get_task_runner(local_task_job):
"""
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
"""
if _TASK_RUNNER == "StandardTaskRunner":
return StandardTaskRunner(local_task_job)
elif _TASK_RUNNER == "CgroupTaskRunner":
from airflow.contrib.task_runner.cgroup_task_runner import CgroupTaskRunner
return CgroupTaskRunner(local_task_job)
else:
raise AirflowException("Unknown task runner type {}".format(_TASK_RUNNER))
|
Get the task runner that can be used to run the given job.
:param local_task_job: The LocalTaskJob associated with the TaskInstance
that needs to be executed.
:type local_task_job: airflow.jobs.LocalTaskJob
:return: The task runner to use to run the task.
:rtype: airflow.task.task_runner.base_task_runner.BaseTaskRunner
|
def read(cls, proto):
"""
Calls :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`
on subclass after converting proto to specific type using
:meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.
:param proto: PyRegionProto capnproto object
"""
regionImpl = proto.regionImpl.as_struct(cls.getSchema())
return cls.readFromProto(regionImpl)
|
Calls :meth:`~nupic.bindings.regions.PyRegion.PyRegion.readFromProto`
on subclass after converting proto to specific type using
:meth:`~nupic.bindings.regions.PyRegion.PyRegion.getSchema`.
:param proto: PyRegionProto capnproto object
|
def timeline(self, timeline="home", max_id=None, min_id=None, since_id=None, limit=None):
"""
Fetch statuses, most recent ones first. `timeline` can be 'home', 'local', 'public',
'tag/hashtag' or 'list/id'. See the following functions documentation for what those do.
Local hashtag timelines are supported via the `timeline_hashtag()`_ function.
The default timeline is the "home" timeline.
Media only queries are supported via the `timeline_public()`_ and `timeline_hashtag()`_ functions.
Returns a list of `toot dicts`_.
"""
if max_id != None:
max_id = self.__unpack_id(max_id)
if min_id != None:
min_id = self.__unpack_id(min_id)
if since_id != None:
since_id = self.__unpack_id(since_id)
params_initial = locals()
if timeline == "local":
timeline = "public"
params_initial['local'] = True
params = self.__generate_params(params_initial, ['timeline'])
url = '/api/v1/timelines/{0}'.format(timeline)
return self.__api_request('GET', url, params)
|
Fetch statuses, most recent ones first. `timeline` can be 'home', 'local', 'public',
'tag/hashtag' or 'list/id'. See the following functions documentation for what those do.
Local hashtag timelines are supported via the `timeline_hashtag()`_ function.
The default timeline is the "home" timeline.
Media only queries are supported via the `timeline_public()`_ and `timeline_hashtag()`_ functions.
Returns a list of `toot dicts`_.
|
def align(s1,s2,test=False,seqfmt='dna',
psm=None,pmm=None,pgo=None,pge=None,
matrix=None,
outscore=False):
"""
Creates pairwise local alignment between seqeunces.
Get the visualization and alignment scores.
:param s1: seqeunce 1
:param s2: seqeunce 2
REF: http://biopython.org/DIST/docs/api/Bio.pairwise2-module.html
The match parameters are:
CODE DESCRIPTION
x No parameters. Identical characters have score of 1, otherwise 0.
m A match score is the score of identical chars, otherwise mismatch
score.
d A dictionary returns the score of any pair of characters.
c A callback function returns scores.
The gap penalty parameters are:
CODE DESCRIPTION
x No gap penalties.
s Same open and extend gap penalties for both sequences.
d The sequences have different open and extend gap penalties.
c A callback function returns the gap penalties.
--
DNA:
localms: psm=2,pmm=0.5,pgo=-3,pge=-1):
Protein:
http://resources.qiagenbioinformatics.com/manuals/clcgenomicsworkbench/650/Use_scoring_matrices.html
"""
import operator
from Bio import pairwise2
if seqfmt=='dna':
if any([p is None for p in [psm,pmm,pgo,pge]]):
alignments = pairwise2.align.localxx(s1.upper(),s2.upper())
else:
alignments = pairwise2.align.localms(s1.upper(),s2.upper(),psm,pmm,pgo,pge)
elif seqfmt=='protein':
from Bio.pairwise2 import format_alignment
from Bio.SubsMat import MatrixInfo
if matrix is None:
matrix = MatrixInfo.blosum62
alignments =pairwise2.align.globaldx(s1, s2, matrix)
# print(format_alignment(*a))
if test:
print(alignments)
alignsymb=np.nan
score=np.nan
sorted_alignments = sorted(alignments, key=operator.itemgetter(2))
for a in alignments:
alignstr=pairwise2.format_alignment(*a)
alignsymb=alignstr.split('\n')[1]
score=a[2]
if test:
print(alignstr)
break
if not outscore:
return alignsymb.replace(' ','-'),score
else:
return score
|
Creates pairwise local alignment between seqeunces.
Get the visualization and alignment scores.
:param s1: seqeunce 1
:param s2: seqeunce 2
REF: http://biopython.org/DIST/docs/api/Bio.pairwise2-module.html
The match parameters are:
CODE DESCRIPTION
x No parameters. Identical characters have score of 1, otherwise 0.
m A match score is the score of identical chars, otherwise mismatch
score.
d A dictionary returns the score of any pair of characters.
c A callback function returns scores.
The gap penalty parameters are:
CODE DESCRIPTION
x No gap penalties.
s Same open and extend gap penalties for both sequences.
d The sequences have different open and extend gap penalties.
c A callback function returns the gap penalties.
--
DNA:
localms: psm=2,pmm=0.5,pgo=-3,pge=-1):
Protein:
http://resources.qiagenbioinformatics.com/manuals/clcgenomicsworkbench/650/Use_scoring_matrices.html
|
def bind_top_down(lower, upper, __fval=None, **fval):
"""Bind 2 layers for building.
When the upper layer is added as a payload of the lower layer, all the arguments # noqa: E501
will be applied to them.
ex:
>>> bind_top_down(Ether, SNAP, type=0x1234)
>>> Ether()/SNAP()
<Ether type=0x1234 |<SNAP |>>
"""
if __fval is not None:
fval.update(__fval)
upper._overload_fields = upper._overload_fields.copy()
upper._overload_fields[lower] = fval
|
Bind 2 layers for building.
When the upper layer is added as a payload of the lower layer, all the arguments # noqa: E501
will be applied to them.
ex:
>>> bind_top_down(Ether, SNAP, type=0x1234)
>>> Ether()/SNAP()
<Ether type=0x1234 |<SNAP |>>
|
def unsplit_query(query):
"""
Create a query string using the tuple query with a format as the one
returned by split_query()
"""
def unsplit_assignment((x, y)):
if (x is not None) and (y is not None):
return x + '=' + y
elif x is not None:
return x
elif y is not None:
return '=' + y
else:
return ''
return '&'.join(map(unsplit_assignment, query))
|
Create a query string using the tuple query with a format as the one
returned by split_query()
|
def autoExpand(self, level=None):
"""
Returns whether or not to expand for the inputed level.
:param level | <int> || None
:return <bool>
"""
return self._autoExpand.get(level, self._autoExpand.get(None, False))
|
Returns whether or not to expand for the inputed level.
:param level | <int> || None
:return <bool>
|
def get(self, url):
"""
Do a GET request
"""
r = requests.get(self._format_url(url), headers=self.headers, timeout=TIMEOUT)
self._check_response(r, 200)
return r.json()
|
Do a GET request
|
def destroy(name, call=None):
'''
To destroy a VM from the VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -d vmname
salt-cloud --destroy vmname
salt-cloud -a destroy vmname
'''
if call == 'function':
raise SaltCloudSystemExit(
'The destroy action must be called with -d, --destroy, '
'-a or --action.'
)
__utils__['cloud.fire_event'](
'event',
'destroying instance',
'salt/cloud/{0}/destroying'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
vm_properties = [
"name",
"summary.runtime.powerState"
]
vm_list = salt.utils.vmware.get_mors_with_properties(_get_si(), vim.VirtualMachine, vm_properties)
for vm in vm_list:
if vm["name"] == name:
if vm["summary.runtime.powerState"] != "poweredOff":
# Power off the vm first
try:
log.info('Powering Off VM %s', name)
task = vm["object"].PowerOff()
salt.utils.vmware.wait_for_task(task, name, 'power off')
except Exception as exc:
log.error(
'Error while powering off VM %s: %s',
name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return 'failed to destroy'
try:
log.info('Destroying VM %s', name)
task = vm["object"].Destroy_Task()
salt.utils.vmware.wait_for_task(task, name, 'destroy')
except Exception as exc:
log.error(
'Error while destroying VM %s: %s',
name, exc,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG
)
return 'failed to destroy'
__utils__['cloud.fire_event'](
'event',
'destroyed instance',
'salt/cloud/{0}/destroyed'.format(name),
args={'name': name},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
if __opts__.get('update_cachedir', False) is True:
__utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__)
return True
|
To destroy a VM from the VMware environment
CLI Example:
.. code-block:: bash
salt-cloud -d vmname
salt-cloud --destroy vmname
salt-cloud -a destroy vmname
|
def _get_dependency_order(g, node_list):
"""Return list of nodes as close as possible to the ordering in node_list,
but with child nodes earlier in the list than parents."""
access_ = accessibility(g)
deps = dict((k, set(v) - set([k])) for k, v in access_.iteritems())
nodes = node_list + list(set(g.nodes()) - set(node_list))
ordered_nodes = []
while nodes:
n_ = nodes[0]
n_deps = deps.get(n_)
if (n_ in ordered_nodes) or (n_deps is None):
nodes = nodes[1:]
continue
moved = False
for i, n in enumerate(nodes[1:]):
if n in n_deps:
nodes = [nodes[i + 1]] + nodes[:i + 1] + nodes[i + 2:]
moved = True
break
if not moved:
ordered_nodes.append(n_)
nodes = nodes[1:]
return ordered_nodes
|
Return list of nodes as close as possible to the ordering in node_list,
but with child nodes earlier in the list than parents.
|
def computeRange(corners):
""" Determine the range spanned by an array of pixel positions. """
x = corners[:, 0]
y = corners[:, 1]
_xrange = (np.minimum.reduce(x), np.maximum.reduce(x))
_yrange = (np.minimum.reduce(y), np.maximum.reduce(y))
return _xrange, _yrange
|
Determine the range spanned by an array of pixel positions.
|
def parallel_starfeatures_lcdir(lcdir,
outdir,
lc_catalog_pickle,
neighbor_radius_arcsec,
fileglob=None,
maxobjects=None,
deredden=True,
custom_bandpasses=None,
lcformat='hat-sql',
lcformatdir=None,
nworkers=NCPUS,
recursive=True):
'''This runs parallel star feature extraction for a directory of LCs.
Parameters
----------
lcdir : list of str
The directory to search for light curves.
outdir : str
The output directory where the results will be placed.
lc_catalog_pickle : str
The path to a catalog containing at a dict with least:
- an object ID array accessible with `dict['objects']['objectid']`
- an LC filename array accessible with `dict['objects']['lcfname']`
- a `scipy.spatial.KDTree` or `cKDTree` object to use for finding
neighbors for each object accessible with `dict['kdtree']`
A catalog pickle of the form needed can be produced using
:py:func:`astrobase.lcproc.catalogs.make_lclist` or
:py:func:`astrobase.lcproc.catalogs.filter_lclist`.
neighbor_radius_arcsec : float
This indicates the radius in arcsec to search for neighbors for this
object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,
and in GAIA.
fileglob : str
The UNIX file glob to use to search for the light curves in `lcdir`. If
None, the default value for the light curve format specified will be
used.
maxobjects : int
The number of objects to process from `lclist`.
deredden : bool
This controls if the colors and any color classifications will be
dereddened using 2MASS DUST.
custom_bandpasses : dict or None
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
nworkers : int
The number of parallel workers to launch.
Returns
-------
dict
A dict with key:val pairs of the input light curve filename and the
output star features pickle for each LC processed.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if not fileglob:
fileglob = dfileglob
# now find the files
LOGINFO('searching for %s light curves in %s ...' % (lcformat, lcdir))
if recursive is False:
matching = glob.glob(os.path.join(lcdir, fileglob))
else:
# use recursive glob for Python 3.5+
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(lcdir,
'**',
fileglob),recursive=True)
# otherwise, use os.walk and glob
else:
# use os.walk to go through the directories
walker = os.walk(lcdir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
fileglob)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
# now that we have all the files, process them
if matching and len(matching) > 0:
LOGINFO('found %s light curves, getting starfeatures...' %
len(matching))
return parallel_starfeatures(matching,
outdir,
lc_catalog_pickle,
neighbor_radius_arcsec,
deredden=deredden,
custom_bandpasses=custom_bandpasses,
maxobjects=maxobjects,
lcformat=lcformat,
lcformatdir=lcformatdir,
nworkers=nworkers)
else:
LOGERROR('no light curve files in %s format found in %s' % (lcformat,
lcdir))
return None
|
This runs parallel star feature extraction for a directory of LCs.
Parameters
----------
lcdir : list of str
The directory to search for light curves.
outdir : str
The output directory where the results will be placed.
lc_catalog_pickle : str
The path to a catalog containing at a dict with least:
- an object ID array accessible with `dict['objects']['objectid']`
- an LC filename array accessible with `dict['objects']['lcfname']`
- a `scipy.spatial.KDTree` or `cKDTree` object to use for finding
neighbors for each object accessible with `dict['kdtree']`
A catalog pickle of the form needed can be produced using
:py:func:`astrobase.lcproc.catalogs.make_lclist` or
:py:func:`astrobase.lcproc.catalogs.filter_lclist`.
neighbor_radius_arcsec : float
This indicates the radius in arcsec to search for neighbors for this
object using the light curve catalog's `kdtree`, `objlist`, `lcflist`,
and in GAIA.
fileglob : str
The UNIX file glob to use to search for the light curves in `lcdir`. If
None, the default value for the light curve format specified will be
used.
maxobjects : int
The number of objects to process from `lclist`.
deredden : bool
This controls if the colors and any color classifications will be
dereddened using 2MASS DUST.
custom_bandpasses : dict or None
This is a dict used to define any custom bandpasses in the
`in_objectinfo` dict you want to make this function aware of and
generate colors for. Use the format below for this dict::
{
'<bandpass_key_1>':{'dustkey':'<twomass_dust_key_1>',
'label':'<band_label_1>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
.
...
.
'<bandpass_key_N>':{'dustkey':'<twomass_dust_key_N>',
'label':'<band_label_N>'
'colors':[['<bandkey1>-<bandkey2>',
'<BAND1> - <BAND2>'],
['<bandkey3>-<bandkey4>',
'<BAND3> - <BAND4>']]},
}
Where:
`bandpass_key` is a key to use to refer to this bandpass in the
`objectinfo` dict, e.g. 'sdssg' for SDSS g band
`twomass_dust_key` is the key to use in the 2MASS DUST result table for
reddening per band-pass. For example, given the following DUST result
table (using http://irsa.ipac.caltech.edu/applications/DUST/)::
|Filter_name|LamEff |A_over_E_B_V_SandF|A_SandF|A_over_E_B_V_SFD|A_SFD|
|char |float |float |float |float |float|
| |microns| |mags | |mags |
CTIO U 0.3734 4.107 0.209 4.968 0.253
CTIO B 0.4309 3.641 0.186 4.325 0.221
CTIO V 0.5517 2.682 0.137 3.240 0.165
.
.
...
The `twomass_dust_key` for 'vmag' would be 'CTIO V'. If you want to
skip DUST lookup and want to pass in a specific reddening magnitude
for your bandpass, use a float for the value of
`twomass_dust_key`. If you want to skip DUST lookup entirely for
this bandpass, use None for the value of `twomass_dust_key`.
`band_label` is the label to use for this bandpass, e.g. 'W1' for
WISE-1 band, 'u' for SDSS u, etc.
The 'colors' list contains color definitions for all colors you want
to generate using this bandpass. this list contains elements of the
form::
['<bandkey1>-<bandkey2>','<BAND1> - <BAND2>']
where the the first item is the bandpass keys making up this color,
and the second item is the label for this color to be used by the
frontends. An example::
['sdssu-sdssg','u - g']
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
nworkers : int
The number of parallel workers to launch.
Returns
-------
dict
A dict with key:val pairs of the input light curve filename and the
output star features pickle for each LC processed.
|
def _convert_iterable(self, iterable):
"""Converts elements returned by an iterable into instances of
self._wrapper
"""
# Return original if _wrapper isn't callable
if not callable(self._wrapper):
return iterable
return [self._wrapper(x) for x in iterable]
|
Converts elements returned by an iterable into instances of
self._wrapper
|
def list_pools(self):
"""Fetches a list of all floating IP pools.
:returns: List of FloatingIpPool objects
"""
search_opts = {'router:external': True}
return [FloatingIpPool(pool) for pool
in self.client.list_networks(**search_opts).get('networks')]
|
Fetches a list of all floating IP pools.
:returns: List of FloatingIpPool objects
|
def max_age(self, value):
"""
Set the MaxAge of the response.
:type value: int
:param value: the MaxAge option
"""
option = Option()
option.number = defines.OptionRegistry.MAX_AGE.number
option.value = int(value)
self.del_option_by_number(defines.OptionRegistry.MAX_AGE.number)
self.add_option(option)
|
Set the MaxAge of the response.
:type value: int
:param value: the MaxAge option
|
def get_possible_initializer_keys(
cls, use_peepholes=False, use_batch_norm_h=True, use_batch_norm_x=False,
use_batch_norm_c=False):
"""Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
gamma_h: batch norm scaling for previous_hidden -> gates
gamma_x: batch norm scaling for input -> gates
gamma_c: batch norm scaling for cell -> output
beta_c: batch norm bias for cell -> output
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_batch_norm_h: Boolean that indicates whether to apply batch
normalization at the previous_hidden -> gates contribution. If you are
experimenting with batch norm then this may be the most effective to
turn on.
use_batch_norm_x: Boolean that indicates whether to apply batch
normalization at the input -> gates contribution.
use_batch_norm_c: Boolean that indicates whether to apply batch
normalization at the cell -> output contribution.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
"""
possible_keys = cls.POSSIBLE_INITIALIZER_KEYS.copy()
if not use_peepholes:
possible_keys.difference_update(
{cls.W_F_DIAG, cls.W_I_DIAG, cls.W_O_DIAG})
if not use_batch_norm_h:
possible_keys.remove(cls.GAMMA_H)
if not use_batch_norm_x:
possible_keys.remove(cls.GAMMA_X)
if not use_batch_norm_c:
possible_keys.difference_update({cls.GAMMA_C, cls.BETA_C})
return possible_keys
|
Returns the keys the dictionary of variable initializers may contain.
The set of all possible initializer keys are:
w_gates: weight for gates
b_gates: bias of gates
w_f_diag: weight for prev_cell -> forget gate peephole
w_i_diag: weight for prev_cell -> input gate peephole
w_o_diag: weight for prev_cell -> output gate peephole
gamma_h: batch norm scaling for previous_hidden -> gates
gamma_x: batch norm scaling for input -> gates
gamma_c: batch norm scaling for cell -> output
beta_c: batch norm bias for cell -> output
Args:
cls:The class.
use_peepholes: Boolean that indicates whether peephole connections are
used.
use_batch_norm_h: Boolean that indicates whether to apply batch
normalization at the previous_hidden -> gates contribution. If you are
experimenting with batch norm then this may be the most effective to
turn on.
use_batch_norm_x: Boolean that indicates whether to apply batch
normalization at the input -> gates contribution.
use_batch_norm_c: Boolean that indicates whether to apply batch
normalization at the cell -> output contribution.
Returns:
Set with strings corresponding to the strings that may be passed to the
constructor.
|
def all_coarse_grains_for_blackbox(blackbox):
"""Generator over all |CoarseGrains| for the given blackbox.
If a box has multiple outputs, those outputs are partitioned into the same
coarse-grain macro-element.
"""
for partition in all_partitions(blackbox.output_indices):
for grouping in all_groupings(partition):
coarse_grain = CoarseGrain(partition, grouping)
try:
validate.blackbox_and_coarse_grain(blackbox, coarse_grain)
except ValueError:
continue
yield coarse_grain
|
Generator over all |CoarseGrains| for the given blackbox.
If a box has multiple outputs, those outputs are partitioned into the same
coarse-grain macro-element.
|
def minion_sign_in_payload(self):
'''
Generates the payload used to authenticate with the master
server. This payload consists of the passed in id_ and the ssh
public key to encrypt the AES key sent back from the master.
:return: Payload dictionary
:rtype: dict
'''
payload = {}
payload['cmd'] = '_auth'
payload['id'] = self.opts['id']
if 'autosign_grains' in self.opts:
autosign_grains = {}
for grain in self.opts['autosign_grains']:
autosign_grains[grain] = self.opts['grains'].get(grain, None)
payload['autosign_grains'] = autosign_grains
try:
pubkey_path = os.path.join(self.opts['pki_dir'], self.mpub)
pub = get_rsa_pub_key(pubkey_path)
if HAS_M2:
payload['token'] = pub.public_encrypt(self.token, RSA.pkcs1_oaep_padding)
else:
cipher = PKCS1_OAEP.new(pub)
payload['token'] = cipher.encrypt(self.token)
except Exception:
pass
with salt.utils.files.fopen(self.pub_path) as f:
payload['pub'] = f.read()
return payload
|
Generates the payload used to authenticate with the master
server. This payload consists of the passed in id_ and the ssh
public key to encrypt the AES key sent back from the master.
:return: Payload dictionary
:rtype: dict
|
def plot_sections(self, fout_dir=".", **kws_usr):
"""Plot groups of GOs which have been placed in sections."""
kws_plt, _ = self._get_kws_plt(None, **kws_usr)
PltGroupedGos(self).plot_sections(fout_dir, **kws_plt)
|
Plot groups of GOs which have been placed in sections.
|
def hexedit(x):
"""Run external hex editor on a packet or bytes. Set editor in conf.prog.hexedit"""
x = bytes(x)
fname = get_temp_file()
with open(fname,"wb") as f:
f.write(x)
subprocess.call([conf.prog.hexedit, fname])
with open(fname, "rb") as f:
x = f.read()
return x
|
Run external hex editor on a packet or bytes. Set editor in conf.prog.hexedit
|
def metadata(self):
"""Get metadata information in XML format."""
params = {
self.PCTYPE: self.CTYPE_XML
}
response = self.call(self.CGI_BUG, params)
return response
|
Get metadata information in XML format.
|
def build_option_parser(parser):
"""Hook to add global options."""
parser.add_argument(
"--os-data-processing-api-version",
metavar="<data-processing-api-version>",
default=utils.env(
'OS_DATA_PROCESSING_API_VERSION',
default=DEFAULT_DATA_PROCESSING_API_VERSION),
help=("Data processing API version, default=" +
DEFAULT_DATA_PROCESSING_API_VERSION +
' (Env: OS_DATA_PROCESSING_API_VERSION)'))
parser.add_argument(
"--os-data-processing-url",
default=utils.env(
"OS_DATA_PROCESSING_URL"),
help=("Data processing API URL, "
"(Env: OS_DATA_PROCESSING_API_URL)"))
return parser
|
Hook to add global options.
|
def reducing(reducer, init=UNSET):
"""Create a reducing transducer with the given reducer.
Args:
reducer: A two-argument function which will be used to combine the
partial cumulative result in the first argument with the next
item from the input stream in the second argument.
Returns: A reducing transducer: A single argument function which,
when passed a reducing function, returns a new reducing function
which entirely reduces the input stream using 'reducer' before
passing the result to the reducing function passed to the
transducer.
"""
reducer2 = reducer
def reducing_transducer(reducer):
return Reducing(reducer, reducer2, init)
return reducing_transducer
|
Create a reducing transducer with the given reducer.
Args:
reducer: A two-argument function which will be used to combine the
partial cumulative result in the first argument with the next
item from the input stream in the second argument.
Returns: A reducing transducer: A single argument function which,
when passed a reducing function, returns a new reducing function
which entirely reduces the input stream using 'reducer' before
passing the result to the reducing function passed to the
transducer.
|
def adsSyncReadReqEx2(
port, address, index_group, index_offset, data_type, return_ctypes=False
):
# type: (int, AmsAddr, int, int, Type, bool) -> Any
"""Read data synchronous from an ADS-device.
:param int port: local AMS port as returned by adsPortOpenEx()
:param pyads.structs.AmsAddr address: local or remote AmsAddr
:param int index_group: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param Type data_type: type of the data given to the PLC, according to
PLCTYPE constants
:param bool return_ctypes: return ctypes instead of python types if True
(default: False)
:rtype: data_type
:return: value: **value**
"""
sync_read_request = _adsDLL.AdsSyncReadReqEx2
ams_address_pointer = ctypes.pointer(address.amsAddrStruct())
index_group_c = ctypes.c_ulong(index_group)
index_offset_c = ctypes.c_ulong(index_offset)
if data_type == PLCTYPE_STRING:
data = (STRING_BUFFER * PLCTYPE_STRING)()
else:
data = data_type()
data_pointer = ctypes.pointer(data)
data_length = ctypes.c_ulong(ctypes.sizeof(data))
bytes_read = ctypes.c_ulong()
bytes_read_pointer = ctypes.pointer(bytes_read)
error_code = sync_read_request(
port,
ams_address_pointer,
index_group_c,
index_offset_c,
data_length,
data_pointer,
bytes_read_pointer,
)
if error_code:
raise ADSError(error_code)
# If we're reading a value of predetermined size (anything but a string),
# validate that the correct number of bytes were read
if data_type != PLCTYPE_STRING and bytes_read.value != data_length.value:
raise RuntimeError(
"Insufficient data (expected {0} bytes, {1} were read).".format(
data_length.value, bytes_read.value
)
)
if return_ctypes:
return data
if data_type == PLCTYPE_STRING:
return data.value.decode("utf-8")
if type(data_type).__name__ == "PyCArrayType":
return [i for i in data]
if hasattr(data, "value"):
return data.value
return data
|
Read data synchronous from an ADS-device.
:param int port: local AMS port as returned by adsPortOpenEx()
:param pyads.structs.AmsAddr address: local or remote AmsAddr
:param int index_group: PLC storage area, according to the INDEXGROUP
constants
:param int index_offset: PLC storage address
:param Type data_type: type of the data given to the PLC, according to
PLCTYPE constants
:param bool return_ctypes: return ctypes instead of python types if True
(default: False)
:rtype: data_type
:return: value: **value**
|
def infer_call(self, context=None):
"""infer a Call node by trying to guess what the function returns"""
callcontext = contextmod.copy_context(context)
callcontext.callcontext = contextmod.CallContext(
args=self.args, keywords=self.keywords
)
callcontext.boundnode = None
if context is not None:
callcontext.extra_context = _populate_context_lookup(self, context.clone())
for callee in self.func.infer(context):
if callee is util.Uninferable:
yield callee
continue
try:
if hasattr(callee, "infer_call_result"):
yield from callee.infer_call_result(caller=self, context=callcontext)
except exceptions.InferenceError:
continue
return dict(node=self, context=context)
|
infer a Call node by trying to guess what the function returns
|
def _get_cache_key(self):
"""
The cache key is a string of concatenated sorted names and values.
"""
keys = list(self.params.keys())
keys.sort()
cache_key = str()
for key in keys:
if key != "api_sig" and key != "api_key" and key != "sk":
cache_key += key + self.params[key]
return hashlib.sha1(cache_key.encode("utf-8")).hexdigest()
|
The cache key is a string of concatenated sorted names and values.
|
def checkQueryRange(self, start, end):
"""
Checks to ensure that the query range is valid within this reference.
If not, raise ReferenceRangeErrorException.
"""
condition = (
(start < 0 or end > self.getLength()) or
start > end or start == end)
if condition:
raise exceptions.ReferenceRangeErrorException(
self.getId(), start, end)
|
Checks to ensure that the query range is valid within this reference.
If not, raise ReferenceRangeErrorException.
|
def taskfile_created_data(file_, role):
"""Return the data for created date
:param file_: the file that holds the data
:type file_: :class:`jukeboxcore.djadapter.models.File`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the created date
:rtype: depending on role
:raises: None
"""
if role == QtCore.Qt.DisplayRole or role == QtCore.Qt.EditRole:
dt = file_.date_created
return dt_to_qdatetime(dt)
|
Return the data for created date
:param file_: the file that holds the data
:type file_: :class:`jukeboxcore.djadapter.models.File`
:param role: item data role
:type role: QtCore.Qt.ItemDataRole
:returns: data for the created date
:rtype: depending on role
:raises: None
|
def path(self):
"""Return the file path abstracted from VCS."""
if (self.source_file.startswith('a/') and
self.target_file.startswith('b/')):
filepath = self.source_file[2:]
elif (self.source_file.startswith('a/') and
self.target_file == '/dev/null'):
filepath = self.source_file[2:]
elif (self.target_file.startswith('b/') and
self.source_file == '/dev/null'):
filepath = self.target_file[2:]
else:
filepath = self.source_file
return filepath
|
Return the file path abstracted from VCS.
|
def add_prefix(self, ncname: str) -> None:
""" Look up ncname and add it to the prefix map if necessary
@param ncname: name to add
"""
if ncname not in self.prefixmap:
uri = cu.expand_uri(ncname + ':', self.curi_maps)
if uri and '://' in uri:
self.prefixmap[ncname] = uri
else:
print(f"Unrecognized prefix: {ncname}", file=sys.stderr)
self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/"
|
Look up ncname and add it to the prefix map if necessary
@param ncname: name to add
|
def _ensure_create_ha_compliant(self, router, router_type):
"""To be called in create_router() BEFORE router is created in DB."""
details = router.pop(ha.DETAILS, {})
if details == ATTR_NOT_SPECIFIED:
details = {}
res = {ha.ENABLED: router.pop(ha.ENABLED, ATTR_NOT_SPECIFIED),
ha.DETAILS: details}
if not is_attr_set(res[ha.ENABLED]):
res[ha.ENABLED] = router_type['ha_enabled_by_default']
if res[ha.ENABLED] and not cfg.CONF.ha.ha_support_enabled:
raise ha.HADisabled()
if not res[ha.ENABLED]:
return res
if not is_attr_set(details.get(ha.TYPE, ATTR_NOT_SPECIFIED)):
details[ha.TYPE] = cfg.CONF.ha.default_ha_mechanism
if details[ha.TYPE] in cfg.CONF.ha.disabled_ha_mechanisms:
raise ha.HADisabledHAType(ha_type=details[ha.TYPE])
if not is_attr_set(details.get(ha.REDUNDANCY_LEVEL,
ATTR_NOT_SPECIFIED)):
details[ha.REDUNDANCY_LEVEL] = (
cfg.CONF.ha.default_ha_redundancy_level)
if not is_attr_set(details.get(ha.PROBE_CONNECTIVITY,
ATTR_NOT_SPECIFIED)):
details[ha.PROBE_CONNECTIVITY] = (
cfg.CONF.ha.connectivity_probing_enabled_by_default)
if not is_attr_set(details.get(ha.PROBE_TARGET, ATTR_NOT_SPECIFIED)):
details[ha.PROBE_TARGET] = cfg.CONF.ha.default_probe_target
if not is_attr_set(details.get(ha.PROBE_INTERVAL, ATTR_NOT_SPECIFIED)):
details[ha.PROBE_INTERVAL] = cfg.CONF.ha.default_ping_interval
return res
|
To be called in create_router() BEFORE router is created in DB.
|
def person_details(self, person_id, standardize=False):
"""Get a detailed person object
:param person_id:
String corresponding to the person's id.
>>> instructor = d.person('jhs878sfd03b38b0d463b16320b5e438')
"""
resp = self._request(path.join(ENDPOINTS['DETAILS'], person_id))
if standardize:
resp['result_data'] = [self.standardize(res) for res in resp['result_data']]
return resp
|
Get a detailed person object
:param person_id:
String corresponding to the person's id.
>>> instructor = d.person('jhs878sfd03b38b0d463b16320b5e438')
|
def get_start_time(self):
"""
Determines when has this process started running.
@rtype: win32.SYSTEMTIME
@return: Process start time.
"""
if win32.PROCESS_ALL_ACCESS == win32.PROCESS_ALL_ACCESS_VISTA:
dwAccess = win32.PROCESS_QUERY_LIMITED_INFORMATION
else:
dwAccess = win32.PROCESS_QUERY_INFORMATION
hProcess = self.get_handle(dwAccess)
CreationTime = win32.GetProcessTimes(hProcess)[0]
return win32.FileTimeToSystemTime(CreationTime)
|
Determines when has this process started running.
@rtype: win32.SYSTEMTIME
@return: Process start time.
|
def get(cls, name, raise_exc=True):
"""
Get the element by name. Does an exact match by element type.
:param str name: name of element
:param bool raise_exc: optionally disable exception.
:raises ElementNotFound: if element does not exist
:rtype: Element
"""
element = cls.objects.filter(name, exact_match=True).first() if \
name is not None else None
if not element and raise_exc:
raise ElementNotFound('Cannot find specified element: %s, type: '
'%s' % (name, cls.__name__))
return element
|
Get the element by name. Does an exact match by element type.
:param str name: name of element
:param bool raise_exc: optionally disable exception.
:raises ElementNotFound: if element does not exist
:rtype: Element
|
def nth(lst, n):
"""Return the nth item in the list."""
expect_type(n, (String, Number), unit=None)
if isinstance(n, String):
if n.value.lower() == 'first':
i = 0
elif n.value.lower() == 'last':
i = -1
else:
raise ValueError("Invalid index %r" % (n,))
else:
# DEVIATION: nth treats lists as circular lists
i = n.to_python_index(len(lst), circular=True)
return lst[i]
|
Return the nth item in the list.
|
def get_file(self, fp, headers=None, cb=None, num_cb=10,
torrent=False, version_id=None, override_num_retries=None,
response_headers=None, callback=None):
"""
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
"""
if cb:
if num_cb > 2:
cb_count = self.size / self.BufferSize / (num_cb-2)
elif num_cb < 0:
cb_count = -1
else:
cb_count = 0
i = total_bytes = 0
cb(total_bytes, self.size)
save_debug = self.bucket.connection.debug
if self.bucket.connection.debug == 1:
self.bucket.connection.debug = 0
query_args = []
if torrent:
query_args.append('torrent')
# If a version_id is passed in, use that. If not, check to see
# if the Key object has an explicit version_id and, if so, use that.
# Otherwise, don't pass a version_id query param.
if version_id is None:
version_id = self.version_id
if version_id:
query_args.append('versionId=%s' % version_id)
if response_headers:
for key in response_headers:
query_args.append('%s=%s' % (key, response_headers[key]))
query_args = '&'.join(query_args)
def file_got(response):
body = self.resp.read()
fp.write(body)
if cb:
cb(total_bytes, self.size)
self.close()
self.bucket.connection.debug = save_debug
if callable(callback):
callback(response)
self.open('r', headers, query_args=query_args,
override_num_retries=override_num_retries, callback=file_got)
|
Retrieves a file from an S3 Key
:type fp: file
:param fp: File pointer to put the data into
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: a callback function that will be called to report
progress on the upload. The callback should accept
two integer parameters, the first representing the
number of bytes that have been successfully
transmitted to S3 and the second representing the
size of the to be transmitted object.
:type cb: int
:param num_cb: (optional) If a callback is specified with
the cb parameter this parameter determines the
granularity of the callback by defining
the maximum number of times the callback will
be called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type override_num_retries: int
:param override_num_retries: If not None will override configured
num_retries parameter for underlying GET.
:type response_headers: dict
:param response_headers: A dictionary containing HTTP headers/values
that will override any headers associated with
the stored object in the response.
See http://goo.gl/EWOPb for details.
|
def act(self, event, *args, **kwargs):
"""
Act on the specific life cycle event. The action here is to invoke the hook function on all registered plugins.
*args and **kwargs will be passed directly to the plugin's hook functions
:param samtranslator.plugins.LifeCycleEvents event: Event to act upon
:return: Nothing
:raises ValueError: If event is not a valid life cycle event
:raises NameError: If a plugin does not have the hook method defined
:raises Exception: Any exception that a plugin raises
"""
if not isinstance(event, LifeCycleEvents):
raise ValueError("'event' must be an instance of LifeCycleEvents class")
method_name = "on_" + event.name
for plugin in self._plugins:
if not hasattr(plugin, method_name):
raise NameError("'{}' method is not found in the plugin with name '{}'"
.format(method_name, plugin.name))
try:
getattr(plugin, method_name)(*args, **kwargs)
except InvalidResourceException as ex:
# Don't need to log these because they don't result in crashes
raise ex
except Exception as ex:
logging.exception("Plugin '%s' raised an exception: %s", plugin.name, ex)
raise ex
|
Act on the specific life cycle event. The action here is to invoke the hook function on all registered plugins.
*args and **kwargs will be passed directly to the plugin's hook functions
:param samtranslator.plugins.LifeCycleEvents event: Event to act upon
:return: Nothing
:raises ValueError: If event is not a valid life cycle event
:raises NameError: If a plugin does not have the hook method defined
:raises Exception: Any exception that a plugin raises
|
def estimate_maximum_read_length(fastq_file, quality_format="fastq-sanger",
nreads=1000):
"""
estimate average read length of a fastq file
"""
in_handle = SeqIO.parse(open_fastq(fastq_file), quality_format)
lengths = []
for _ in range(nreads):
try:
lengths.append(len(next(in_handle).seq))
except StopIteration:
break
in_handle.close()
return max(lengths)
|
estimate average read length of a fastq file
|
def set_attribute(self, element, attribute, value):
"""
:Description: Modify the given attribute of the target element.
:param element: Element for browser instance to target.
:type element: WebElement
:param attribute: Attribute of target element to modify.
:type attribute: string
:param value: Value of target element's attribute to modify.
:type value: None, bool, int, float, string
"""
self.browser.execute_script('arguments[0].setAttribute("%s", %s);' % (
attribute, self.__type2js(value=value)), element)
|
:Description: Modify the given attribute of the target element.
:param element: Element for browser instance to target.
:type element: WebElement
:param attribute: Attribute of target element to modify.
:type attribute: string
:param value: Value of target element's attribute to modify.
:type value: None, bool, int, float, string
|
def remove_translation(self, context_id, translation_id):
"""Removes a translation entry from a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int translation_id: The id-value representing the translation.
:return bool: True if translation entry removal was successful.
"""
return self.context.deleteAddressTranslation(translation_id,
id=context_id)
|
Removes a translation entry from a tunnel context.
:param int context_id: The id-value representing the context instance.
:param int translation_id: The id-value representing the translation.
:return bool: True if translation entry removal was successful.
|
def put(self, data, block=True):
""" If there is space it sends data to server
If no space in the queue
It returns the request in every 10 millisecond
until there will be space in the queue.
"""
self.start(test_connection=False)
while True:
response = self._req_rep(QueuingServerMessageListener.SPACE)
if response == QueuingServerMessageListener.SPACE_AVAILABLE:
self._req_rep((QueuingServerMessageListener.DATA, data))
break
else:
time.sleep(0.01)
|
If there is space it sends data to server
If no space in the queue
It returns the request in every 10 millisecond
until there will be space in the queue.
|
def createFile(
self,
fileName,
desiredAccess,
shareMode,
creationDisposition,
flagsAndAttributes,
dokanFileInfo,
):
"""Creates a file.
:param fileName: name of file to create
:type fileName: ctypes.c_wchar_p
:param desiredAccess: desired access flags
:type desiredAccess: ctypes.c_ulong
:param shareMode: share mode flags
:type shareMode: ctypes.c_ulong
:param creationDisposition: creation disposition flags
:type creationDisposition: ctypes.c_ulong
:param flagsAndAttributes: creation flags and attributes
:type flagsAndAttributes: ctypes.c_ulong
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
"""
return self.operations('createFile', fileName)
|
Creates a file.
:param fileName: name of file to create
:type fileName: ctypes.c_wchar_p
:param desiredAccess: desired access flags
:type desiredAccess: ctypes.c_ulong
:param shareMode: share mode flags
:type shareMode: ctypes.c_ulong
:param creationDisposition: creation disposition flags
:type creationDisposition: ctypes.c_ulong
:param flagsAndAttributes: creation flags and attributes
:type flagsAndAttributes: ctypes.c_ulong
:param dokanFileInfo: used by Dokan
:type dokanFileInfo: PDOKAN_FILE_INFO
:return: error code
:rtype: ctypes.c_int
|
def column_names(self):
"""
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.__proxy__.get_vertex_fields()
elif self._is_edge_frame():
return self.__graph__.__proxy__.get_edge_fields()
|
Returns the column names.
Returns
-------
out : list[string]
Column names of the SFrame.
|
def set_value(self, instance, value, parent=None):
"""
Set prop value
:param instance:
:param value:
:param parent:
:return:
"""
self.resolve_base(instance)
value = self.deserialize(value, parent)
instance.values[self.alias] = value
self._trigger_changed(instance, value)
|
Set prop value
:param instance:
:param value:
:param parent:
:return:
|
def query_file(self, path, fetchall=False, **params):
"""Like Database.query, but takes a filename to load a query from."""
with self.get_connection() as conn:
return conn.query_file(path, fetchall, **params)
|
Like Database.query, but takes a filename to load a query from.
|
def get_saved_rules(conf_file=None):
'''
Return a data structure of the rules in the conf file
CLI Example:
.. code-block:: bash
salt '*' nftables.get_saved_rules
'''
if _conf() and not conf_file:
conf_file = _conf()
with salt.utils.files.fopen(conf_file) as fp_:
lines = salt.utils.data.decode(fp_.readlines())
rules = []
for line in lines:
tmpline = line.strip()
if not tmpline:
continue
if tmpline.startswith('#'):
continue
rules.append(line)
return rules
|
Return a data structure of the rules in the conf file
CLI Example:
.. code-block:: bash
salt '*' nftables.get_saved_rules
|
def _broadcast_shapes(s1, s2):
"""Given array shapes `s1` and `s2`, compute the shape of the array that would
result from broadcasting them together."""
n1 = len(s1)
n2 = len(s2)
n = max(n1, n2)
res = [1] * n
for i in range(n):
if i >= n1:
c1 = 1
else:
c1 = s1[n1-1-i]
if i >= n2:
c2 = 1
else:
c2 = s2[n2-1-i]
if c1 == 1:
rc = c2
elif c2 == 1 or c1 == c2:
rc = c1
else:
raise ValueError('array shapes %r and %r are not compatible' % (s1, s2))
res[n-1-i] = rc
return tuple(res)
|
Given array shapes `s1` and `s2`, compute the shape of the array that would
result from broadcasting them together.
|
def to_type(self, dtype: type, *cols, **kwargs):
"""
Convert colums values to a given type in the
main dataframe
:param dtype: a type to convert to: ex: ``str``
:type dtype: type
:param \*cols: names of the colums
:type \*cols: str, at least one
:param \*\*kwargs: keyword arguments for ``df.astype``
:type \*\*kwargs: optional
:example: ``ds.to_type(str, "mycol")``
"""
try:
allcols = self.df.columns.values
for col in cols:
if col not in allcols:
self.err("Column " + col + " not found")
return
self.df[col] = self.df[col].astype(dtype, **kwargs)
except Exception as e:
self.err(e, "Can not convert to type")
|
Convert colums values to a given type in the
main dataframe
:param dtype: a type to convert to: ex: ``str``
:type dtype: type
:param \*cols: names of the colums
:type \*cols: str, at least one
:param \*\*kwargs: keyword arguments for ``df.astype``
:type \*\*kwargs: optional
:example: ``ds.to_type(str, "mycol")``
|
def get_formfield(model, field):
"""
Return the formfied associate to the field of the model
"""
class_field = model._meta.get_field(field)
if hasattr(class_field, "field"):
formfield = class_field.field.formfield()
else:
formfield = class_field.formfield()
# Otherwise the formfield contain the reverse relation
if isinstance(formfield, ChoiceField):
formfield.choices = class_field.get_choices()
return formfield
|
Return the formfied associate to the field of the model
|
def get_toolbar_buttons(self):
"""Return toolbar buttons list."""
buttons = []
# Code to add the stop button
if self.stop_button is None:
self.stop_button = create_toolbutton(
self,
text=_("Stop"),
icon=self.stop_icon,
tip=_("Stop the current command"))
self.disable_stop_button()
# set click event handler
self.stop_button.clicked.connect(self.stop_button_click_handler)
if is_dark_interface():
self.stop_button.setStyleSheet("QToolButton{padding: 3px;}")
if self.stop_button is not None:
buttons.append(self.stop_button)
# Reset namespace button
if self.reset_button is None:
self.reset_button = create_toolbutton(
self,
text=_("Remove"),
icon=ima.icon('editdelete'),
tip=_("Remove all variables"),
triggered=self.reset_namespace)
if is_dark_interface():
self.reset_button.setStyleSheet("QToolButton{padding: 3px;}")
if self.reset_button is not None:
buttons.append(self.reset_button)
if self.options_button is None:
options = self.get_options_menu()
if options:
self.options_button = create_toolbutton(self,
text=_('Options'), icon=ima.icon('tooloptions'))
self.options_button.setPopupMode(QToolButton.InstantPopup)
menu = QMenu(self)
add_actions(menu, options)
self.options_button.setMenu(menu)
if self.options_button is not None:
buttons.append(self.options_button)
return buttons
|
Return toolbar buttons list.
|
def shrink(self, fraction=0.85):
"""Shrink the triangle polydata in the representation of the input mesh.
Example:
.. code-block:: python
from vtkplotter import *
pot = load(datadir + 'shapes/teapot.vtk').shrink(0.75)
s = Sphere(r=0.2).pos(0,0,-0.5)
show(pot, s)
|shrink| |shrink.py|_
"""
poly = self.polydata(True)
shrink = vtk.vtkShrinkPolyData()
shrink.SetInputData(poly)
shrink.SetShrinkFactor(fraction)
shrink.Update()
return self.updateMesh(shrink.GetOutput())
|
Shrink the triangle polydata in the representation of the input mesh.
Example:
.. code-block:: python
from vtkplotter import *
pot = load(datadir + 'shapes/teapot.vtk').shrink(0.75)
s = Sphere(r=0.2).pos(0,0,-0.5)
show(pot, s)
|shrink| |shrink.py|_
|
def add_replica(self, partition_name, count=1):
"""Increase the replication-factor for a partition.
The replication-group to add to is determined as follows:
1. Find all replication-groups that have brokers not already
replicating the partition.
2. Of these, find replication-groups that have fewer than the
average number of replicas for this partition.
3. Choose the replication-group with the fewest overall partitions.
:param partition_name: (topic_id, partition_id) of the partition to add
replicas of.
:param count: The number of replicas to add.
:raises InvalidReplicationFactorError when the resulting replication
factor is greater than the number of brokers in the cluster.
"""
try:
partition = self.cluster_topology.partitions[partition_name]
except KeyError:
raise InvalidPartitionError(
"Partition name {name} not found".format(name=partition_name),
)
if partition.replication_factor + count > len(self.cluster_topology.brokers):
raise InvalidReplicationFactorError(
"Cannot increase replication factor to {0}. There are only "
"{1} brokers."
.format(
partition.replication_factor + count,
len(self.cluster_topology.brokers),
)
)
non_full_rgs = [
rg
for rg in self.cluster_topology.rgs.values()
if rg.count_replica(partition) < len(rg.brokers)
]
for _ in range(count):
total_replicas = sum(
rg.count_replica(partition)
for rg in non_full_rgs
)
opt_replicas, _ = compute_optimum(
len(non_full_rgs),
total_replicas,
)
under_replicated_rgs = [
rg
for rg in non_full_rgs
if rg.count_replica(partition) < opt_replicas
]
candidate_rgs = under_replicated_rgs or non_full_rgs
rg = min(candidate_rgs, key=lambda rg: len(rg.partitions))
rg.add_replica(partition)
if rg.count_replica(partition) >= len(rg.brokers):
non_full_rgs.remove(rg)
|
Increase the replication-factor for a partition.
The replication-group to add to is determined as follows:
1. Find all replication-groups that have brokers not already
replicating the partition.
2. Of these, find replication-groups that have fewer than the
average number of replicas for this partition.
3. Choose the replication-group with the fewest overall partitions.
:param partition_name: (topic_id, partition_id) of the partition to add
replicas of.
:param count: The number of replicas to add.
:raises InvalidReplicationFactorError when the resulting replication
factor is greater than the number of brokers in the cluster.
|
def eval_gpr(expr, knockouts):
"""evaluate compiled ast of gene_reaction_rule with knockouts
Parameters
----------
expr : Expression
The ast of the gene reaction rule
knockouts : DictList, set
Set of genes that are knocked out
Returns
-------
bool
True if the gene reaction rule is true with the given knockouts
otherwise false
"""
if isinstance(expr, Expression):
return eval_gpr(expr.body, knockouts)
elif isinstance(expr, Name):
return expr.id not in knockouts
elif isinstance(expr, BoolOp):
op = expr.op
if isinstance(op, Or):
return any(eval_gpr(i, knockouts) for i in expr.values)
elif isinstance(op, And):
return all(eval_gpr(i, knockouts) for i in expr.values)
else:
raise TypeError("unsupported operation " + op.__class__.__name__)
elif expr is None:
return True
else:
raise TypeError("unsupported operation " + repr(expr))
|
evaluate compiled ast of gene_reaction_rule with knockouts
Parameters
----------
expr : Expression
The ast of the gene reaction rule
knockouts : DictList, set
Set of genes that are knocked out
Returns
-------
bool
True if the gene reaction rule is true with the given knockouts
otherwise false
|
def _is_valid_datatype(datatype_instance):
"""
Returns true if datatype_instance is a valid datatype object and false otherwise.
"""
# Remap so we can still use the python types for the simple cases
global _simple_type_remap
if datatype_instance in _simple_type_remap:
return True
# Now set the protobuf from this interface.
if isinstance(datatype_instance, (Int64, Double, String, Array)):
return True
elif isinstance(datatype_instance, Dictionary):
kt = datatype_instance.key_type
if isinstance(kt, (Int64, String)):
return True
return False
|
Returns true if datatype_instance is a valid datatype object and false otherwise.
|
def twitch_receive_messages(self):
"""
Call this function to process everything received by the socket
This needs to be called frequently enough (~10s) Twitch logs off
users not replying to ping commands.
:return: list of chat messages received. Each message is a dict
with the keys ['channel', 'username', 'message']
"""
self._push_from_buffer()
result = []
while True:
# process the complete buffer, until no data is left no more
try:
msg = self.s.recv(4096).decode() # NON-BLOCKING RECEIVE!
except socket.error as e:
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
# There is no more data available to read
return result
else:
# a "real" error occurred
# import traceback
# import sys
# print(traceback.format_exc())
# print("Trying to recover...")
self.connect()
return result
else:
if self.verbose:
print(msg)
rec = [self._parse_message(line)
for line in filter(None, msg.split('\r\n'))]
rec = [r for r in rec if r] # remove Nones
result.extend(rec)
|
Call this function to process everything received by the socket
This needs to be called frequently enough (~10s) Twitch logs off
users not replying to ping commands.
:return: list of chat messages received. Each message is a dict
with the keys ['channel', 'username', 'message']
|
def create_from_tuple(cls, volume):
"""
Create instance from tuple.
:param volume: tuple in one one of the following forms: target | source,target | source,target,mode
:return: instance of Volume
"""
if isinstance(volume, six.string_types):
return Volume(target=volume)
elif len(volume) == 2:
return Volume(source=volume[0],
target=volume[1])
elif len(volume) == 3:
return Volume(source=volume[0],
target=volume[1],
mode=volume[2])
else:
logger.debug("Cannot create volume instance from {}."
"It has to be tuple of form target x source,target x source,target,mode.".format(volume))
raise ConuException("Cannot create volume instance.")
|
Create instance from tuple.
:param volume: tuple in one one of the following forms: target | source,target | source,target,mode
:return: instance of Volume
|
def roll(self, count=0, func=sum):
'''Roll some dice!
:param count: [0] Return list of sums
:param func: [sum] Apply func to list of individual die rolls func([])
:return: A single sum or list of ``count`` sums
'''
if count:
return [func([die.roll() for die in self._dice]) for x in range(0, count)]
else:
return func([die.roll() for die in self._dice])
|
Roll some dice!
:param count: [0] Return list of sums
:param func: [sum] Apply func to list of individual die rolls func([])
:return: A single sum or list of ``count`` sums
|
def get_tuple(nuplet, index, default=None):
"""
:param tuple nuplet: A tuple
:param int index: An index
:param default: An optional default value
:return: ``nuplet[index]`` if defined, else ``default`` (possibly ``None``)
"""
if nuplet is None:
return default
try:
return nuplet[index]
except IndexError:
return default
|
:param tuple nuplet: A tuple
:param int index: An index
:param default: An optional default value
:return: ``nuplet[index]`` if defined, else ``default`` (possibly ``None``)
|
def _get_property_values_with_defaults(self, classname, property_values):
"""Return the property values for the class, with default values applied where needed."""
# To uphold OrientDB semantics, make a new dict with all property values set
# to their default values, which are None if no default was set.
# Then, overwrite its data with the supplied property values.
final_values = self.get_default_property_values(classname)
final_values.update(property_values)
return final_values
|
Return the property values for the class, with default values applied where needed.
|
def get_objects_from_from_queues(self):
""" Get objects from "from" queues and add them.
:return: True if we got something in the queue, False otherwise.
:rtype: bool
"""
_t0 = time.time()
had_some_objects = False
for module in self.modules_manager.get_external_instances():
queue = module.from_q
if not queue:
continue
while True:
queue_size = queue.qsize()
if queue_size:
statsmgr.gauge('queues.from.%s.count' % module.get_name(), queue_size)
try:
obj = queue.get_nowait()
except Full:
logger.warning("Module %s from queue is full", module.get_name())
except Empty:
break
except (IOError, EOFError) as exp:
logger.warning("Module %s from queue is no more available: %s",
module.get_name(), str(exp))
except Exception as exp: # pylint: disable=broad-except
logger.error("An external module queue got a problem '%s'", str(exp))
else:
had_some_objects = True
self.add(obj)
statsmgr.timer('queues.time', time.time() - _t0)
return had_some_objects
|
Get objects from "from" queues and add them.
:return: True if we got something in the queue, False otherwise.
:rtype: bool
|
def act(self):
"""
Carries out the action associated with Stop button
"""
g = get_root(self).globals
g.clog.debug('Stop pressed')
# Stop exposure meter
# do this first, so timer doesn't also try to enable idle mode
g.info.timer.stop()
def stop_in_background():
try:
self.stopping = True
if execCommand(g, 'abort'):
self.stopped_ok = True
else:
g.clog.warn('Failed to stop run')
self.stopped_ok = False
self.stopping = False
except Exception as err:
g.clog.warn('Failed to stop run. Error = ' + str(err))
self.stopping = False
self.stopped_ok = False
# stopping can take a while during which the GUI freezes so run in
# background.
t = threading.Thread(target=stop_in_background)
t.daemon = True
t.start()
self.after(500, self.check)
|
Carries out the action associated with Stop button
|
def p_propertyDeclaration_3(p):
"""propertyDeclaration_3 : dataType propertyName array ';'"""
p[0] = CIMProperty(p[2], None, type=p[1], is_array=True,
array_size=p[3])
|
propertyDeclaration_3 : dataType propertyName array ';
|
def send_feedback(self, document_id: str, feedback: List[Field]) -> dict:
"""Send feedback to the model.
This method takes care of sending feedback related to document specified by document_id.
Feedback consists of ground truth values for the document specified as a list of Field instances.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> feedback = [Field(label='total_amount', value='120.00'), Field(label='purchase_date', value='2019-03-10')]
>>> api_client.send_feedback('<document id>', feedback)
:param document_id: The document id of the document that will receive the feedback
:type document_id: str
:param feedback: A list of :py:class:`~las.Field` representing the ground truth values for the document
:type feedback: List[Field]
:return: Feedback response
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
return self.post_document_id(document_id, feedback)
|
Send feedback to the model.
This method takes care of sending feedback related to document specified by document_id.
Feedback consists of ground truth values for the document specified as a list of Field instances.
>>> from las import ApiClient
>>> api_client = ApiClient(endpoint='<api endpoint>')
>>> feedback = [Field(label='total_amount', value='120.00'), Field(label='purchase_date', value='2019-03-10')]
>>> api_client.send_feedback('<document id>', feedback)
:param document_id: The document id of the document that will receive the feedback
:type document_id: str
:param feedback: A list of :py:class:`~las.Field` representing the ground truth values for the document
:type feedback: List[Field]
:return: Feedback response
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
|
def estimate(data, fit_offset="mean", fit_profile="tilt",
border_px=0, from_mask=None, ret_mask=False):
"""Estimate the background value of an image
Parameters
----------
data: np.ndarray
Data from which to compute the background value
fit_profile: str
The type of background profile to fit:
- "offset": offset only
- "poly2o": 2D 2nd order polynomial with mixed terms
- "tilt": 2D linear tilt with offset (default)
fit_offset: str
The method for computing the profile offset
- "fit": offset as fitting parameter
- "gauss": center of a gaussian fit
- "mean": simple average
- "mode": mode (see `qpimage.bg_estimate.mode`)
border_px: float
Assume that a frame of `border_px` pixels around
the image is background.
from_mask: boolean np.ndarray or None
Use a boolean array to define the background area.
The boolean mask must have the same shape as the
input data. `True` elements are used for background
estimation.
ret_mask: bool
Return the boolean mask used to compute the background.
Notes
-----
If both `border_px` and `from_mask` are given, the
intersection of the two is used, i.e. the positions
where both, the frame mask and `from_mask`, are
`True`.
"""
if fit_profile not in VALID_FIT_PROFILES:
msg = "`fit_profile` must be one of {}, got '{}'".format(
VALID_FIT_PROFILES,
fit_profile)
raise ValueError(msg)
if fit_offset not in VALID_FIT_OFFSETS:
msg = "`fit_offset` must be one of {}, got '{}'".format(
VALID_FIT_OFFSETS,
fit_offset)
raise ValueError(msg)
# initial mask image
if from_mask is not None:
assert isinstance(from_mask, np.ndarray)
mask = from_mask.copy()
else:
mask = np.ones_like(data, dtype=bool)
# multiply with border mask image (intersection)
if border_px > 0:
border_px = int(np.round(border_px))
mask_px = np.zeros_like(mask)
mask_px[:border_px, :] = True
mask_px[-border_px:, :] = True
mask_px[:, :border_px] = True
mask_px[:, -border_px:] = True
# intersection
np.logical_and(mask, mask_px, out=mask)
# compute background image
if fit_profile == "tilt":
bgimg = profile_tilt(data, mask)
elif fit_profile == "poly2o":
bgimg = profile_poly2o(data, mask)
else:
bgimg = np.zeros_like(data, dtype=float)
# add offsets
if fit_offset == "fit":
if fit_profile == "offset":
msg = "`fit_offset=='fit'` only valid when `fit_profile!='offset`"
raise ValueError(msg)
# nothing else to do here, using offset from fit
elif fit_offset == "gauss":
bgimg += offset_gaussian((data - bgimg)[mask])
elif fit_offset == "mean":
bgimg += np.mean((data - bgimg)[mask])
elif fit_offset == "mode":
bgimg += offset_mode((data - bgimg)[mask])
if ret_mask:
ret = (bgimg, mask)
else:
ret = bgimg
return ret
|
Estimate the background value of an image
Parameters
----------
data: np.ndarray
Data from which to compute the background value
fit_profile: str
The type of background profile to fit:
- "offset": offset only
- "poly2o": 2D 2nd order polynomial with mixed terms
- "tilt": 2D linear tilt with offset (default)
fit_offset: str
The method for computing the profile offset
- "fit": offset as fitting parameter
- "gauss": center of a gaussian fit
- "mean": simple average
- "mode": mode (see `qpimage.bg_estimate.mode`)
border_px: float
Assume that a frame of `border_px` pixels around
the image is background.
from_mask: boolean np.ndarray or None
Use a boolean array to define the background area.
The boolean mask must have the same shape as the
input data. `True` elements are used for background
estimation.
ret_mask: bool
Return the boolean mask used to compute the background.
Notes
-----
If both `border_px` and `from_mask` are given, the
intersection of the two is used, i.e. the positions
where both, the frame mask and `from_mask`, are
`True`.
|
def password_attributes_max_retry(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
password_attributes = ET.SubElement(config, "password-attributes", xmlns="urn:brocade.com:mgmt:brocade-aaa")
max_retry = ET.SubElement(password_attributes, "max-retry")
max_retry.text = kwargs.pop('max_retry')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def push(self, x):
"""
Push an I{object} onto the stack.
@param x: An object to push.
@type x: L{Frame}
@return: The pushed frame.
@rtype: L{Frame}
"""
if isinstance(x, Frame):
frame = x
else:
frame = Frame(x)
self.stack.append(frame)
#log.debug('push: (%s)\n%s', Repr(frame), Repr(self.stack))
return frame
|
Push an I{object} onto the stack.
@param x: An object to push.
@type x: L{Frame}
@return: The pushed frame.
@rtype: L{Frame}
|
def delete(self, session, commit=True, soft=True):
"""
Delete a row from the DB.
:param session: flask_sqlalchemy session object
:param commit: whether to issue the commit
:param soft: whether this is a soft delete (i.e., update time_removed)
"""
if soft:
self.time_removed = sqlalchemy.func.unix_timestamp()
else:
session.delete(self)
if commit:
session.commit()
|
Delete a row from the DB.
:param session: flask_sqlalchemy session object
:param commit: whether to issue the commit
:param soft: whether this is a soft delete (i.e., update time_removed)
|
def last_version():
"""
Fetch the last version from pypi and return it. On successful fetch from pypi, the response
is cached 24h, on error, it is cached 10 min.
:return: the last django-cas-server version
:rtype: unicode
"""
try:
last_update, version, success = last_version._cache
except AttributeError:
last_update = 0
version = None
success = False
cache_delta = 24 * 3600 if success else 600
if (time.time() - last_update) < cache_delta:
return version
else:
try:
req = requests.get(settings.CAS_NEW_VERSION_JSON_URL)
data = json.loads(req.text)
version = data["info"]["version"]
last_version._cache = (time.time(), version, True)
return version
except (
KeyError,
ValueError,
requests.exceptions.RequestException
) as error: # pragma: no cover (should not happen unless pypi is not available)
logger.error(
"Unable to fetch %s: %s" % (settings.CAS_NEW_VERSION_JSON_URL, error)
)
last_version._cache = (time.time(), version, False)
|
Fetch the last version from pypi and return it. On successful fetch from pypi, the response
is cached 24h, on error, it is cached 10 min.
:return: the last django-cas-server version
:rtype: unicode
|
def load_tab_data(self):
"""Preload all data that for the tabs that will be displayed."""
for tab in self._tabs.values():
if tab.load and not tab.data_loaded:
try:
tab._data = tab.get_context_data(self.request)
except Exception:
tab._data = False
exceptions.handle(self.request)
|
Preload all data that for the tabs that will be displayed.
|
def disabled(name):
'''
Disable the RDP service
'''
ret = {'name': name,
'result': True,
'changes': {},
'comment': ''}
stat = __salt__['rdp.status']()
if stat:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'RDP will be disabled'
return ret
ret['result'] = __salt__['rdp.disable']()
ret['changes'] = {'RDP was disabled': True}
return ret
ret['comment'] = 'RDP is disabled'
return ret
|
Disable the RDP service
|
def assign_reads_to_database(query, database_fasta, out_path, params=None):
"""Assign a set of query sequences to a reference database
database_fasta_fp: absolute file path to the reference database
query_fasta_fp: absolute file path to query sequences
output_fp: absolute file path of the file to be output
params: dict of BWA specific parameters.
* Specify which algorithm to use (bwa-short or bwasw) using the
dict key "algorithm"
* if algorithm is bwasw, specify params for the bwa bwasw
subcommand
* if algorithm is bwa-short, specify params for the bwa samse
subcommand
* if algorithm is bwa-short, must also specify params to use with
bwa aln, which is used to get the sai file necessary to run samse.
bwa aln params should be passed in using dict key "aln_params" and
the associated value should be a dict of params for the bwa aln
subcommand
* if a temporary directory is not specified in params using dict
key "temp_dir", it will be assumed to be /tmp
This method returns an open file object (SAM format).
"""
if params is None:
params = {}
# set the output path
params['-f'] = out_path
# if the algorithm is not specified in the params dict, or the algorithm
# is not recognized, raise an exception
if 'algorithm' not in params:
raise InvalidArgumentApplicationError("Must specify which algorithm to"
" use ('bwa-short' or 'bwasw')")
elif params['algorithm'] not in ('bwa-short', 'bwasw'):
raise InvalidArgumentApplicationError("Unknown algorithm '%s' Please "
"enter either 'bwa-short' or "
"'bwasw'." % params['algorithm'])
# if the temp directory is not specified, assume /tmp
if 'temp_dir' not in params:
params['temp_dir'] = '/tmp'
# if the algorithm is bwa-short, we must build use bwa aln to get an sai
# file before calling bwa samse on that sai file, so we need to know how
# to run bwa aln. Therefore, we must ensure there's an entry containing
# those parameters
if params['algorithm'] == 'bwa-short':
if 'aln_params' not in params:
raise InvalidArgumentApplicationError("With bwa-short, need to "
"specify a key 'aln_params' "
"and its value, a dictionary"
" to pass to bwa aln, since"
" bwa aln is an intermediate"
" step when doing "
"bwa-short.")
# we have this params dict, with "algorithm" and "temp_dir", etc which are
# not for any of the subcommands, so make a new params dict that is the
# same as the original minus these addendums
subcommand_params = {}
for k, v in params.iteritems():
if k not in ('algorithm', 'temp_dir', 'aln_params'):
subcommand_params[k] = v
# build index from database_fasta
# get a temporary file name that is not in use
_, index_prefix = mkstemp(dir=params['temp_dir'], suffix='')
create_bwa_index_from_fasta_file(database_fasta, {'-p': index_prefix})
# if the algorithm is bwasw, things are pretty simple. Just instantiate
# the proper controller and set the files
if params['algorithm'] == 'bwasw':
bwa = BWA_bwasw(params=subcommand_params)
files = {'prefix': index_prefix, 'query_fasta': query}
# if the algorithm is bwa-short, it's not so simple
elif params['algorithm'] == 'bwa-short':
# we have to call bwa_aln to get the sai file needed for samse
# use the aln_params we ensured we had above
bwa_aln = BWA_aln(params=params['aln_params'])
aln_files = {'prefix': index_prefix, 'fastq_in': query}
# get the path to the sai file
sai_file_path = bwa_aln(aln_files)['output'].name
# we will use that sai file to run samse
bwa = BWA_samse(params=subcommand_params)
files = {'prefix': index_prefix, 'sai_in': sai_file_path,
'fastq_in': query}
# run which ever app controller we decided was correct on the files
# we set up
result = bwa(files)
# they both return a SAM file, so return that
return result['output']
|
Assign a set of query sequences to a reference database
database_fasta_fp: absolute file path to the reference database
query_fasta_fp: absolute file path to query sequences
output_fp: absolute file path of the file to be output
params: dict of BWA specific parameters.
* Specify which algorithm to use (bwa-short or bwasw) using the
dict key "algorithm"
* if algorithm is bwasw, specify params for the bwa bwasw
subcommand
* if algorithm is bwa-short, specify params for the bwa samse
subcommand
* if algorithm is bwa-short, must also specify params to use with
bwa aln, which is used to get the sai file necessary to run samse.
bwa aln params should be passed in using dict key "aln_params" and
the associated value should be a dict of params for the bwa aln
subcommand
* if a temporary directory is not specified in params using dict
key "temp_dir", it will be assumed to be /tmp
This method returns an open file object (SAM format).
|
def get_changes(self, checks=None, imports=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes needed by this restructuring
`resources` can be a list of `rope.base.resources.File`\s to
apply the restructuring on. If `None`, the restructuring will
be applied to all python files.
`checks` argument has been deprecated. Use the `args` argument
of the constructor. The usage of::
strchecks = {'obj1.type': 'mod.A', 'obj2': 'mod.B',
'obj3.object': 'mod.C'}
checks = restructuring.make_checks(strchecks)
can be replaced with::
args = {'obj1': 'type=mod.A', 'obj2': 'name=mod.B',
'obj3': 'object=mod.C'}
where obj1, obj2 and obj3 are wildcard names that appear
in restructuring pattern.
"""
if checks is not None:
warnings.warn(
'The use of checks parameter is deprecated; '
'use the args parameter of the constructor instead.',
DeprecationWarning, stacklevel=2)
for name, value in checks.items():
self.args[name] = similarfinder._pydefined_to_str(value)
if imports is not None:
warnings.warn(
'The use of imports parameter is deprecated; '
'use imports parameter of the constructor, instead.',
DeprecationWarning, stacklevel=2)
self.imports = imports
changes = change.ChangeSet('Restructuring <%s> to <%s>' %
(self.pattern, self.goal))
if resources is not None:
files = [resource for resource in resources
if libutils.is_python_file(self.project, resource)]
else:
files = self.project.get_python_files()
job_set = task_handle.create_jobset('Collecting Changes', len(files))
for resource in files:
job_set.started_job(resource.path)
pymodule = self.project.get_pymodule(resource)
finder = similarfinder.SimilarFinder(pymodule,
wildcards=self.wildcards)
matches = list(finder.get_matches(self.pattern, self.args))
computer = self._compute_changes(matches, pymodule)
result = computer.get_changed()
if result is not None:
imported_source = self._add_imports(resource, result,
self.imports)
changes.add_change(change.ChangeContents(resource,
imported_source))
job_set.finished_job()
return changes
|
Get the changes needed by this restructuring
`resources` can be a list of `rope.base.resources.File`\s to
apply the restructuring on. If `None`, the restructuring will
be applied to all python files.
`checks` argument has been deprecated. Use the `args` argument
of the constructor. The usage of::
strchecks = {'obj1.type': 'mod.A', 'obj2': 'mod.B',
'obj3.object': 'mod.C'}
checks = restructuring.make_checks(strchecks)
can be replaced with::
args = {'obj1': 'type=mod.A', 'obj2': 'name=mod.B',
'obj3': 'object=mod.C'}
where obj1, obj2 and obj3 are wildcard names that appear
in restructuring pattern.
|
def split_by_idxs(self, train_idx, valid_idx):
"Split the data between `train_idx` and `valid_idx`."
return self.split_by_list(self[train_idx], self[valid_idx])
|
Split the data between `train_idx` and `valid_idx`.
|
def check_reaction_consistency(database, solver, exchange=set(),
checked=set(), zeromass=set(), weights={}):
"""Check inconsistent reactions by minimizing mass residuals
Return a reaction iterable, and compound iterable. The reaction iterable
yields reaction ids and mass residuals. The compound iterable yields
compound ids and mass assignments.
Each compound is assigned a mass of at least one, and the masses are
balanced using the stoichiometric matrix. In addition, each reaction has a
residual mass that is included in the mass balance equations. The L1-norm
of the residuals is minimized. Reactions in the checked set are assumed to
have been manually checked and therefore have the residual fixed at zero.
"""
# Create Flux balance problem
prob = solver.create_problem()
compound_set = _non_localized_compounds(database)
mass_compounds = compound_set.difference(zeromass)
# Define mass variables
m = prob.namespace(mass_compounds, lower=1)
# Define residual mass variables and objective constriants
z = prob.namespace(database.reactions, lower=0)
r = prob.namespace(database.reactions)
objective = z.expr((reaction_id, weights.get(reaction_id, 1))
for reaction_id in database.reactions)
prob.set_objective(objective)
rs = r.set(database.reactions)
zs = z.set(database.reactions)
prob.add_linear_constraints(zs >= rs, rs >= -zs)
massbalance_lhs = {reaction_id: 0 for reaction_id in database.reactions}
for (compound, reaction_id), value in iteritems(database.matrix):
if compound not in zeromass:
mass_var = m(compound.in_compartment(None))
massbalance_lhs[reaction_id] += mass_var * value
for reaction_id, lhs in iteritems(massbalance_lhs):
if reaction_id not in exchange:
if reaction_id not in checked:
prob.add_linear_constraints(lhs + r(reaction_id) == 0)
else:
prob.add_linear_constraints(lhs == 0)
# Solve
try:
prob.solve(lp.ObjectiveSense.Minimize)
except lp.SolverError as e:
raise_from(
MassConsistencyError('Failed to solve mass consistency: {}'.format(
e)), e)
def iterate_reactions():
for reaction_id in database.reactions:
residual = r.value(reaction_id)
yield reaction_id, residual
def iterate_compounds():
for compound in mass_compounds:
yield compound, m.value(compound)
return iterate_reactions(), iterate_compounds()
|
Check inconsistent reactions by minimizing mass residuals
Return a reaction iterable, and compound iterable. The reaction iterable
yields reaction ids and mass residuals. The compound iterable yields
compound ids and mass assignments.
Each compound is assigned a mass of at least one, and the masses are
balanced using the stoichiometric matrix. In addition, each reaction has a
residual mass that is included in the mass balance equations. The L1-norm
of the residuals is minimized. Reactions in the checked set are assumed to
have been manually checked and therefore have the residual fixed at zero.
|
def minkowski_distance(x, y, p=2):
"""
Calculates the minkowski distance between two points.
:param x: the first point
:param y: the second point
:param p: the order of the minkowski algorithm. If *p=1* it is equal
to the manhatten distance, if *p=2* it is equal to the euclidian
distance. The higher the order, the closer it converges to the
Chebyshev distance, which has *p=infinity*.
"""
from math import pow
assert len(y) == len(x)
assert len(x) >= 1
sum = 0
for i in range(len(x)):
sum += abs(x[i] - y[i]) ** p
return pow(sum, 1.0 / float(p))
|
Calculates the minkowski distance between two points.
:param x: the first point
:param y: the second point
:param p: the order of the minkowski algorithm. If *p=1* it is equal
to the manhatten distance, if *p=2* it is equal to the euclidian
distance. The higher the order, the closer it converges to the
Chebyshev distance, which has *p=infinity*.
|
async def connect(self, client_id, conn_string):
"""Connect to a device on behalf of a client.
See :meth:`AbstractDeviceAdapter.connect`.
Args:
client_id (str): The client we are working for.
conn_string (str): A connection string that will be
passed to the underlying device adapter to connect.
Raises:
DeviceServerError: There is an issue with your client_id.
DeviceAdapterError: The adapter had an issue connecting.
"""
conn_id = self.adapter.unique_conn_id()
self._client_info(client_id)
await self.adapter.connect(conn_id, conn_string)
self._hook_connect(conn_string, conn_id, client_id)
|
Connect to a device on behalf of a client.
See :meth:`AbstractDeviceAdapter.connect`.
Args:
client_id (str): The client we are working for.
conn_string (str): A connection string that will be
passed to the underlying device adapter to connect.
Raises:
DeviceServerError: There is an issue with your client_id.
DeviceAdapterError: The adapter had an issue connecting.
|
def isNumber(self, value):
"""
Validate whether a value is a number or not
"""
try:
str(value)
float(value)
return True
except ValueError:
return False
|
Validate whether a value is a number or not
|
def _step(self, dataset):
'''Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
'''
if dataset is None:
values = [self.f_step()]
else:
values = [self.f_step(*x) for x in dataset]
return collections.OrderedDict(
zip(self._monitor_names, np.mean(values, axis=0)))
|
Advance the state of the optimizer by one step.
Parameters
----------
dataset : :class:`Dataset <downhill.dataset.Dataset>`
A dataset for optimizing the model.
Returns
-------
train_monitors : dict
A dictionary mapping monitor names to values.
|
def get_centroids(self, ridx):
"""
:returns: array of centroids for the given rupture index
"""
centroids = []
with h5py.File(self.source_file, "r") as hdf5:
for idx in ridx:
trace = "{:s}/{:s}".format(self.idx_set["sec"], str(idx))
centroids.append(hdf5[trace + "/Centroids"].value)
return numpy.concatenate(centroids)
|
:returns: array of centroids for the given rupture index
|
def extract_params():
"""Extract request params."""
uri = _get_uri_from_request(request)
http_method = request.method
headers = dict(request.headers)
if 'wsgi.input' in headers:
del headers['wsgi.input']
if 'wsgi.errors' in headers:
del headers['wsgi.errors']
# Werkzeug, and subsequently Flask provide a safe Authorization header
# parsing, so we just replace the Authorization header with the extraced
# info if it was successfully parsed.
if request.authorization:
headers['Authorization'] = request.authorization
body = request.form.to_dict()
return uri, http_method, body, headers
|
Extract request params.
|
def pack(self, value=None):
r"""Pack the value as a binary representation.
Considering an example with UBInt8 class, that inherits from
GenericType:
>>> from pyof.foundation.basic_types import UBInt8
>>> objectA = UBInt8(1)
>>> objectB = 5
>>> objectA.pack()
b'\x01'
>>> objectA.pack(objectB)
b'\x05'
Args:
value: If the value is None, then we will pack the value of the
current instance. Otherwise, if value is an instance of the
same type as the current instance, then we call the pack of the
value object. Otherwise, we will use the current instance pack
method on the passed value.
Returns:
bytes: The binary representation.
Raises:
:exc:`~.exceptions.BadValueException`: If the value does not
fit the binary format.
"""
if isinstance(value, type(self)):
return value.pack()
if value is None:
value = self.value
elif 'value' in dir(value):
# if it is enum or bitmask gets only the 'int' value
value = value.value
try:
return struct.pack(self._fmt, value)
except struct.error:
expected_type = type(self).__name__
actual_type = type(value).__name__
msg_args = expected_type, value, actual_type
msg = 'Expected {}, found value "{}" of type {}'.format(*msg_args)
raise PackException(msg)
|
r"""Pack the value as a binary representation.
Considering an example with UBInt8 class, that inherits from
GenericType:
>>> from pyof.foundation.basic_types import UBInt8
>>> objectA = UBInt8(1)
>>> objectB = 5
>>> objectA.pack()
b'\x01'
>>> objectA.pack(objectB)
b'\x05'
Args:
value: If the value is None, then we will pack the value of the
current instance. Otherwise, if value is an instance of the
same type as the current instance, then we call the pack of the
value object. Otherwise, we will use the current instance pack
method on the passed value.
Returns:
bytes: The binary representation.
Raises:
:exc:`~.exceptions.BadValueException`: If the value does not
fit the binary format.
|
def register(self, resource=None, **meta):
""" Add resource to the API.
:param resource: Resource class for registration
:param **meta: Redefine Meta options for the resource
:return adrest.views.Resource: Generated resource.
"""
if resource is None:
def wrapper(resource):
return self.register(resource, **meta)
return wrapper
# Must be instance of ResourceView
if not issubclass(resource, ResourceView):
raise AssertionError("%s not subclass of ResourceView" % resource)
# Cannot be abstract
if resource._meta.abstract:
raise AssertionError("Attempt register of abstract resource: %s."
% resource)
# Fabric of resources
meta = dict(self.meta, **meta)
meta['name'] = meta.get('name', resource._meta.name)
options = type('Meta', tuple(), meta)
params = dict(api=self, Meta=options, **meta)
params['__module__'] = '%s.%s' % (
self.prefix, self.str_version.replace('.', '_'))
params['__doc__'] = resource.__doc__
new_resource = type(
'%s%s' % (resource.__name__, len(self.resources)),
(resource,), params)
if self.resources.get(new_resource._meta.url_name):
logger.warning(
"A resource '%r' is replacing the existing record for '%s'",
new_resource, self.resources.get(new_resource._meta.url_name))
self.resources[new_resource._meta.url_name] = new_resource
return resource
|
Add resource to the API.
:param resource: Resource class for registration
:param **meta: Redefine Meta options for the resource
:return adrest.views.Resource: Generated resource.
|
def split (s, delimter, trim = True, limit = 0): # pragma: no cover
"""
Split a string using a single-character delimter
@params:
`s`: the string
`delimter`: the single-character delimter
`trim`: whether to trim each part. Default: True
@examples:
```python
ret = split("'a,b',c", ",")
# ret == ["'a,b'", "c"]
# ',' inside quotes will be recognized.
```
@returns:
The list of substrings
"""
ret = []
special1 = ['(', ')', '[', ']', '{', '}']
special2 = ['\'', '"']
special3 = '\\'
flags1 = [0, 0, 0]
flags2 = [False, False]
flags3 = False
start = 0
nlim = 0
for i, c in enumerate(s):
if c == special3:
# next char is escaped
flags3 = not flags3
elif not flags3:
# no escape
if c in special1:
index = special1.index(c)
if index % 2 == 0:
flags1[int(index/2)] += 1
else:
flags1[int(index/2)] -= 1
elif c in special2:
index = special2.index(c)
flags2[index] = not flags2[index]
elif c == delimter and not any(flags1) and not any(flags2):
r = s[start:i]
if trim: r = r.strip()
ret.append(r)
start = i + 1
nlim = nlim + 1
if limit and nlim >= limit:
break
else:
# escaping closed
flags3 = False
r = s[start:]
if trim: r = r.strip()
ret.append(r)
return ret
|
Split a string using a single-character delimter
@params:
`s`: the string
`delimter`: the single-character delimter
`trim`: whether to trim each part. Default: True
@examples:
```python
ret = split("'a,b',c", ",")
# ret == ["'a,b'", "c"]
# ',' inside quotes will be recognized.
```
@returns:
The list of substrings
|
def make_unique_str(num_chars=20):
"""make a random string of characters for a temp filename"""
chars = 'abcdefghigklmnopqrstuvwxyz'
all_chars = chars + chars.upper() + '01234567890'
picks = list(all_chars)
return ''.join([choice(picks) for i in range(num_chars)])
|
make a random string of characters for a temp filename
|
def create(self, file_or_path, **kwargs):
"""
Creates an upload for the given file or path.
"""
opened = False
if isinstance(file_or_path, str_type()):
file_or_path = open(file_or_path, 'rb')
opened = True
elif not getattr(file_or_path, 'read', False):
raise Exception("A file or path to a file is required for this operation.")
try:
return self.client._post(
self._url(),
file_or_path,
headers=self._resource_class.create_headers({}),
file_upload=True
)
finally:
if opened:
file_or_path.close()
|
Creates an upload for the given file or path.
|
def data_worker(**kwargs):
"""
Function to be spawned concurrently,
consume data keys from input queue, and push the resulting dataframes to output map
"""
if kwargs is not None:
if "function" in kwargs:
function = kwargs["function"]
else:
Exception("Invalid arguments, no function specified")
if "input" in kwargs:
input_queue = kwargs["input"]
else:
Exception("Invalid Arguments, no input queue")
if "output" in kwargs:
output_map = kwargs["output"]
else:
Exception("Invalid Arguments, no output map")
if "token" in kwargs:
argsdict = {"quandl_token": kwargs["token"]}
else:
if "Quandl" in function.__module__:
Exception("Invalid Arguments, no Quandl token")
if ("source" and "begin" and "end") in kwargs:
argsdict = {"data_source": kwargs["source"], "begin": kwargs["begin"], "end": kwargs["end"]}
else:
if "pandas.io.data" in function.__module__:
Exception("Invalid Arguments, no pandas data source specified")
if ("source" in kwargs) and (("begin" and "end") not in kwargs):
argsdict = {"data_source": kwargs["source"]}
else:
if "pandas.io.data" in function.__module__:
Exception("Invalid Arguments, no pandas data source specified")
else:
Exception("Invalid Arguments")
retries = 5
while not input_queue.empty():
data_key = input_queue.get()
get_data(function, data_key, output_map, retries, argsdict)
|
Function to be spawned concurrently,
consume data keys from input queue, and push the resulting dataframes to output map
|
def add_repo(self, repo):
"""Add ``repo`` to this team.
:param str repo: (required), form: 'user/repo'
:returns: bool
"""
url = self._build_url('repos', repo, base_url=self._api)
return self._boolean(self._put(url), 204, 404)
|
Add ``repo`` to this team.
:param str repo: (required), form: 'user/repo'
:returns: bool
|
def polfit_residuals_with_sigma_rejection(
x, y, deg, times_sigma_reject,
color='b', size=75,
xlim=None, ylim=None,
xlabel=None, ylabel=None, title=None,
use_r=None,
geometry=(0,0,640,480),
debugplot=0):
"""Polynomial fit with iterative rejection of points.
This function makes use of function polfit_residuals for display
purposes.
Parameters
----------
x : 1d numpy array, float
X coordinates of the data being fitted.
y : 1d numpy array, float
Y coordinates of the data being fitted.
deg : int
Degree of the fitting polynomial.
times_sigma_reject : float or None
Number of times the standard deviation to reject points
iteratively. If None, the fit does not reject any point.
color : single character or 1d numpy array of characters
Color for all the symbols (single character) or for each
individual symbol (array of color names with the same length as
'x' or 'y'). If 'color' is a single character, the rejected
points are displayed in red color, whereas when 'color' is an
array of color names, rejected points are displayed with the
color provided in this array.
size : int
Marker size for all the symbols (single character) or for each
individual symbol (array of integers with the same length as
'x' or 'y').
xlim : tuple (floats)
Plot limits in the X axis.
ylim : tuple (floats)
Plot limits in the Y axis.
xlabel : string
Character string for label in X axis.
ylabel : string
Character string for label in y axis.
title : string
Character string for graph title.
use_r : bool
If True, the function computes several fits, using R, to
polynomials of degree deg, deg+1 and deg+2 (when possible).
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Return
------
poly : instance of Polynomial (numpy)
Result from the polynomial fit using numpy Polynomial. Only
points not flagged as rejected are employed in the fit.
yres : 1d numpy array, float
Residuals from polynomial fit. Note that the residuals are
computed for all the points, including the rejected ones. In
this way the dimension of this array is the same as the
dimensions of the input 'x' and 'y' arrays.
reject : 1d numpy array, bool
Boolean array indicating rejected points.
"""
# protections
if type(x) is not np.ndarray:
raise ValueError("x=" + str(x) + " must be a numpy.ndarray")
elif x.ndim != 1:
raise ValueError("x.ndim=" + str(x.ndim) + " must be 1")
if type(y) is not np.ndarray:
raise ValueError("y=" + str(y) + " must be a numpy.ndarray")
elif y.ndim != 1:
raise ValueError("y.ndim=" + str(y.ndim) + " must be 1")
npoints = x.size
if npoints != y.size:
raise ValueError("x.size != y.size")
if type(deg) not in [np.int, np.int64]:
raise ValueError("deg=" + str(deg) +
" is not a valid integer")
if deg >= npoints:
raise ValueError("Polynomial degree=" + str(deg) +
" can't be fitted with npoints=" + str(npoints))
# initialize boolean rejection array
reject = np.zeros(npoints, dtype=np.bool)
# if there is no room to remove any point, compute a fit without
# rejection
if deg == npoints - 1:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=None,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
return poly, yres, reject
# main loop to reject points iteratively
loop_to_reject_points = True
poly = None
yres = None
while loop_to_reject_points:
if abs(debugplot) in [21, 22]:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=reject,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
else:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=reject)
# check that there is room to remove a point with the current
# polynomial degree
npoints_effective = npoints - np.sum(reject)
if deg < npoints_effective - 1:
# determine robuts standard deviation, excluding points
# already rejected
# --- method 1 ---
# yres_fitted = yres[np.logical_not(reject)]
# q25, q75 = np.percentile(yres_fitted, q=[25.0, 75.0])
# rms = 0.7413 * (q75 - q25)
# --- method 2 ---
yres_fitted = np.abs(yres[np.logical_not(reject)])
rms = np.median(yres_fitted)
if abs(debugplot) >= 10:
print("--> robust rms:", rms)
# reject fitted point exceeding the threshold with the
# largest deviation (note: with this method only one point
# is removed in each iteration of the loop; this allows the
# recomputation of the polynomial fit which, sometimes,
# transforms deviant points into good ones)
index_to_remove = []
for i in range(npoints):
if not reject[i]:
if np.abs(yres[i]) > times_sigma_reject * rms:
index_to_remove.append(i)
if abs(debugplot) >= 10:
print('--> suspicious point #', i + 1)
if len(index_to_remove) == 0:
if abs(debugplot) >= 10:
print('==> no need to remove any point')
loop_to_reject_points = False
else:
imax = np.argmax(np.abs(yres[index_to_remove]))
reject[index_to_remove[imax]] = True
if abs(debugplot) >= 10:
print('==> removing point #', index_to_remove[imax] + 1)
else:
loop_to_reject_points = False
# plot final fit in case it has not been already shown
if abs(debugplot) % 10 != 0:
if abs(debugplot) not in [21, 22]:
poly, yres = polfit_residuals(x=x, y=y, deg=deg, reject=reject,
color=color, size=size,
xlim=xlim, ylim=ylim,
xlabel=xlabel, ylabel=ylabel,
title=title,
use_r=use_r,
geometry=geometry,
debugplot=debugplot)
else:
if abs(debugplot) >= 10:
print(' ')
# return result
return poly, yres, reject
|
Polynomial fit with iterative rejection of points.
This function makes use of function polfit_residuals for display
purposes.
Parameters
----------
x : 1d numpy array, float
X coordinates of the data being fitted.
y : 1d numpy array, float
Y coordinates of the data being fitted.
deg : int
Degree of the fitting polynomial.
times_sigma_reject : float or None
Number of times the standard deviation to reject points
iteratively. If None, the fit does not reject any point.
color : single character or 1d numpy array of characters
Color for all the symbols (single character) or for each
individual symbol (array of color names with the same length as
'x' or 'y'). If 'color' is a single character, the rejected
points are displayed in red color, whereas when 'color' is an
array of color names, rejected points are displayed with the
color provided in this array.
size : int
Marker size for all the symbols (single character) or for each
individual symbol (array of integers with the same length as
'x' or 'y').
xlim : tuple (floats)
Plot limits in the X axis.
ylim : tuple (floats)
Plot limits in the Y axis.
xlabel : string
Character string for label in X axis.
ylabel : string
Character string for label in y axis.
title : string
Character string for graph title.
use_r : bool
If True, the function computes several fits, using R, to
polynomials of degree deg, deg+1 and deg+2 (when possible).
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Return
------
poly : instance of Polynomial (numpy)
Result from the polynomial fit using numpy Polynomial. Only
points not flagged as rejected are employed in the fit.
yres : 1d numpy array, float
Residuals from polynomial fit. Note that the residuals are
computed for all the points, including the rejected ones. In
this way the dimension of this array is the same as the
dimensions of the input 'x' and 'y' arrays.
reject : 1d numpy array, bool
Boolean array indicating rejected points.
|
def _get_devices_by_activation_state(self, state):
'''Get a list of bigips by activation statue.
:param state: str -- state to filter the returned list of devices
:returns: list -- list of devices that are in the given state
'''
devices_with_state = []
for device in self.devices:
act = device.tm.cm.devices.device.load(
name=get_device_info(device).name,
partition=self.partition
)
if act.failoverState == state:
devices_with_state.append(device)
return devices_with_state
|
Get a list of bigips by activation statue.
:param state: str -- state to filter the returned list of devices
:returns: list -- list of devices that are in the given state
|
def _compute_mean(self, C, f0, f1, f2, SC, mag, rrup, idxs, mean,
scale_fac):
"""
Compute mean value (for a set of indexes) without site amplification
terms. This is equation (5), p. 2191, without S term.
"""
mean[idxs] = (C['c1'] +
C['c2'] * mag +
C['c3'] * (mag ** 2) +
(C['c4'] + C['c5'] * mag) * f1[idxs] +
(C['c6'] + C['c7'] * mag) * f2[idxs] +
(C['c8'] + C['c9'] * mag) * f0[idxs] +
C['c10'] * rrup[idxs] +
self._compute_stress_drop_adjustment(SC, mag, scale_fac))
|
Compute mean value (for a set of indexes) without site amplification
terms. This is equation (5), p. 2191, without S term.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.