code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False):
'''
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- fs: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
'''
if weight >= 1:
weight = 0.99
if weight <= 0:
weight = 0.01
# Step 1: feature extraction
x = audioBasicIO.stereo2mono(x)
st_feats, _ = aF.stFeatureExtraction(x, fs, st_win * fs,
st_step * fs)
# Step 2: train binary svm classifier of low vs high energy frames
# keep only the energy short-term sequence (2nd feature)
st_energy = st_feats[1, :]
en = numpy.sort(st_energy)
# number of 10% of the total short-term windows
l1 = int(len(en) / 10)
# compute "lower" 10% energy threshold
t1 = numpy.mean(en[0:l1]) + 0.000000000000001
# compute "higher" 10% energy threshold
t2 = numpy.mean(en[-l1:-1]) + 0.000000000000001
# get all features that correspond to low energy
class1 = st_feats[:, numpy.where(st_energy <= t1)[0]]
# get all features that correspond to high energy
class2 = st_feats[:, numpy.where(st_energy >= t2)[0]]
# form the binary classification task and ...
faets_s = [class1.T, class2.T]
# normalize and train the respective svm probabilistic model
# (ONSET vs SILENCE)
[faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s)
svm = aT.trainSVM(faets_s_norm, 1.0)
# Step 3: compute onset probability based on the trained svm
prob_on_set = []
for i in range(st_feats.shape[1]):
# for each frame
cur_fv = (st_feats[:, i] - means_s) / stds_s
# get svm probability (that it belongs to the ONSET class)
prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1])
prob_on_set = numpy.array(prob_on_set)
# smooth probability:
prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step)
# Step 4A: detect onset frame indices:
prog_on_set_sort = numpy.sort(prob_on_set)
# find probability Threshold as a weighted average
# of top 10% and lower 10% of the values
Nt = int(prog_on_set_sort.shape[0] / 10)
T = (numpy.mean((1 - weight) * prog_on_set_sort[0:Nt]) +
weight * numpy.mean(prog_on_set_sort[-Nt::]))
max_idx = numpy.where(prob_on_set > T)[0]
# get the indices of the frames that satisfy the thresholding
i = 0
time_clusters = []
seg_limits = []
# Step 4B: group frame indices to onset segments
while i < len(max_idx):
# for each of the detected onset indices
cur_cluster = [max_idx[i]]
if i == len(max_idx)-1:
break
while max_idx[i+1] - cur_cluster[-1] <= 2:
cur_cluster.append(max_idx[i+1])
i += 1
if i == len(max_idx)-1:
break
i += 1
time_clusters.append(cur_cluster)
seg_limits.append([cur_cluster[0] * st_step,
cur_cluster[-1] * st_step])
# Step 5: Post process: remove very small segments:
min_dur = 0.2
seg_limits_2 = []
for s in seg_limits:
if s[1] - s[0] > min_dur:
seg_limits_2.append(s)
seg_limits = seg_limits_2
if plot:
timeX = numpy.arange(0, x.shape[0] / float(fs), 1.0 / fs)
plt.subplot(2, 1, 1)
plt.plot(timeX, x)
for s in seg_limits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.subplot(2, 1, 2)
plt.plot(numpy.arange(0, prob_on_set.shape[0] * st_step, st_step),
prob_on_set)
plt.title('Signal')
for s in seg_limits:
plt.axvline(x=s[0])
plt.axvline(x=s[1])
plt.title('svm Probability')
plt.show()
return seg_limits
|
Event Detection (silence removal)
ARGUMENTS:
- x: the input audio signal
- fs: sampling freq
- st_win, st_step: window size and step in seconds
- smoothWindow: (optinal) smooth window (in seconds)
- weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict
- plot: (optinal) True if results are to be plotted
RETURNS:
- seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that
the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
|
def load_ldap_config(self): # pragma: no cover
"""Configure LDAP Client settings."""
try:
with open('{}/ldap_info.yaml'.format(self.config_dir),
'r') as FILE:
config = yaml.load(FILE)
self.host = config['server']
self.user_dn = config['user_dn']
self.port = config['port']
self.basedn = config['basedn']
self.mail_domain = config['mail_domain']
self.service_ou = config['service_ou']
except OSError as err:
print('{}: Config file ({}/ldap_info.yaml) not found'.format(
type(err), self.config_dir))
|
Configure LDAP Client settings.
|
def permute(self, idx):
"""Permutes the columns of the factor matrices inplace
"""
# Check that input is a true permutation
if set(idx) != set(range(self.rank)):
raise ValueError('Invalid permutation specified.')
# Update factors
self.factors = [f[:, idx] for f in self.factors]
return self.factors
|
Permutes the columns of the factor matrices inplace
|
def calc_missingremoterelease_v1(self):
"""Calculate the portion of the required remote demand that could not
be met by the actual discharge release.
Required flux sequences:
|RequiredRemoteRelease|
|ActualRelease|
Calculated flux sequence:
|MissingRemoteRelease|
Basic equation:
:math:`MissingRemoteRelease = max(
RequiredRemoteRelease-ActualRelease, 0)`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> fluxes.requiredremoterelease = 2.0
>>> fluxes.actualrelease = 1.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(1.0)
>>> fluxes.actualrelease = 3.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(0.0)
"""
flu = self.sequences.fluxes.fastaccess
flu.missingremoterelease = max(
flu.requiredremoterelease-flu.actualrelease, 0.)
|
Calculate the portion of the required remote demand that could not
be met by the actual discharge release.
Required flux sequences:
|RequiredRemoteRelease|
|ActualRelease|
Calculated flux sequence:
|MissingRemoteRelease|
Basic equation:
:math:`MissingRemoteRelease = max(
RequiredRemoteRelease-ActualRelease, 0)`
Example:
>>> from hydpy.models.dam import *
>>> parameterstep()
>>> fluxes.requiredremoterelease = 2.0
>>> fluxes.actualrelease = 1.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(1.0)
>>> fluxes.actualrelease = 3.0
>>> model.calc_missingremoterelease_v1()
>>> fluxes.missingremoterelease
missingremoterelease(0.0)
|
def split_seq(sam_num, n_tile):
"""
Split the number(sam_num) into numbers by n_tile
"""
import math
print(sam_num)
print(n_tile)
start_num = sam_num[0::int(math.ceil(len(sam_num) / (n_tile)))]
end_num = start_num[1::]
end_num.append(len(sam_num))
return [[i, j] for i, j in zip(start_num, end_num)]
|
Split the number(sam_num) into numbers by n_tile
|
def create_entity(self, name, gl_structure, description=None):
"""
Create an entity and add it to the model.
:param name: The entity name.
:param gl_structure: The entity's general ledger structure.
:param description: The entity description.
:returns: The created entity.
"""
new_entity = Entity(name, gl_structure, description=description)
self.entities.append(new_entity)
return new_entity
|
Create an entity and add it to the model.
:param name: The entity name.
:param gl_structure: The entity's general ledger structure.
:param description: The entity description.
:returns: The created entity.
|
def _processDML(self, dataset_name, cols, reader):
"""Overridden version of create DML for SQLLite"""
sql_template = self._generateInsertStatement(dataset_name, cols)
# Now insert in batch, reader is a list of rows to insert at this point
c = self.conn.cursor()
c.executemany(sql_template, reader)
self.conn.commit()
|
Overridden version of create DML for SQLLite
|
def main(args=None):
"""Roundtrip the .glyphs file given as an argument."""
for arg in args:
glyphsLib.dump(load(open(arg, "r", encoding="utf-8")), sys.stdout)
|
Roundtrip the .glyphs file given as an argument.
|
def from_dict(cls, pods):
"""
Returns a new Fragment from a dictionary representation.
"""
frag = cls()
frag.content = pods['content']
frag._resources = [FragmentResource(**d) for d in pods['resources']] # pylint: disable=protected-access
frag.js_init_fn = pods['js_init_fn']
frag.js_init_version = pods['js_init_version']
frag.json_init_args = pods['json_init_args']
return frag
|
Returns a new Fragment from a dictionary representation.
|
def ufloatDict_nominal(self, ufloat_dict):
'This gives us a dictionary of nominal values from a dictionary of uncertainties'
return OrderedDict(izip(ufloat_dict.keys(), map(lambda x: x.nominal_value, ufloat_dict.values())))
|
This gives us a dictionary of nominal values from a dictionary of uncertainties
|
def calcRapRperi(self,**kwargs):
"""
NAME:
calcRapRperi
PURPOSE:
calculate the apocenter and pericenter radii
INPUT:
OUTPUT:
(rperi,rap)
HISTORY:
2010-12-01 - Written - Bovy (NYU)
"""
if hasattr(self,'_rperirap'): #pragma: no cover
return self._rperirap
EL= self.calcEL(**kwargs)
E, L= EL
if self._vR == 0. and m.fabs(self._vT - vcirc(self._pot,self._R,use_physical=False)) < _EPS: #We are on a circular orbit
rperi= self._R
rap = self._R
elif self._vR == 0. and self._vT > vcirc(self._pot,self._R,use_physical=False): #We are exactly at pericenter
rperi= self._R
if self._gamma != 0.:
startsign= _rapRperiAxiEq(self._R+10.**-8.,E,L,self._pot)
startsign/= m.fabs(startsign)
else: startsign= 1.
rend= _rapRperiAxiFindStart(self._R,E,L,self._pot,rap=True,
startsign=startsign)
rap= optimize.brentq(_rapRperiAxiEq,rperi+0.00001,rend,
args=(E,L,self._pot))
# fprime=_rapRperiAxiDeriv)
elif self._vR == 0. and self._vT < vcirc(self._pot,self._R,use_physical=False): #We are exactly at apocenter
rap= self._R
if self._gamma != 0.:
startsign= _rapRperiAxiEq(self._R-10.**-8.,E,L,self._pot)
startsign/= m.fabs(startsign)
else: startsign= 1.
rstart= _rapRperiAxiFindStart(self._R,E,L,self._pot,
startsign=startsign)
if rstart == 0.: rperi= 0.
else:
rperi= optimize.brentq(_rapRperiAxiEq,rstart,rap-0.000001,
args=(E,L,self._pot))
# fprime=_rapRperiAxiDeriv)
else:
if self._gamma != 0.:
startsign= _rapRperiAxiEq(self._R,E,L,self._pot)
startsign/= m.fabs(startsign)
else:
startsign= 1.
rstart= _rapRperiAxiFindStart(self._R,E,L,self._pot,
startsign=startsign)
if rstart == 0.: rperi= 0.
else:
try:
rperi= optimize.brentq(_rapRperiAxiEq,rstart,self._R,
(E,L,self._pot),
maxiter=200)
except RuntimeError: #pragma: no cover
raise UnboundError("Orbit seems to be unbound")
rend= _rapRperiAxiFindStart(self._R,E,L,self._pot,rap=True,
startsign=startsign)
rap= optimize.brentq(_rapRperiAxiEq,self._R,rend,
(E,L,self._pot))
self._rperirap= (rperi,rap)
return self._rperirap
|
NAME:
calcRapRperi
PURPOSE:
calculate the apocenter and pericenter radii
INPUT:
OUTPUT:
(rperi,rap)
HISTORY:
2010-12-01 - Written - Bovy (NYU)
|
def handleError(self, record):
"""
Handles any errors raised during the :meth:`emit` method. Will only try to pass exceptions to fallback notifier
(if defined) in case the exception is a sub-class of :exc:`~notifiers.exceptions.NotifierException`
:param record: :class:`logging.LogRecord`
"""
if logging.raiseExceptions:
t, v, tb = sys.exc_info()
if issubclass(t, NotifierException) and self.fallback:
msg = f"Could not log msg to provider '{self.provider.name}'!\n{v}"
self.fallback_defaults["message"] = msg
self.fallback.notify(**self.fallback_defaults)
else:
super().handleError(record)
|
Handles any errors raised during the :meth:`emit` method. Will only try to pass exceptions to fallback notifier
(if defined) in case the exception is a sub-class of :exc:`~notifiers.exceptions.NotifierException`
:param record: :class:`logging.LogRecord`
|
def resume_training(self, train_data, model_path, valid_data=None):
"""This model resume training of a classifier by reloading the appropriate state_dicts for each model
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
model_path: the path to the saved checpoint for resuming training
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
"""
restore_state = self.checkpointer.restore(model_path)
loss_fn = self._get_loss_fn()
self.train()
self._train_model(
train_data=train_data,
loss_fn=loss_fn,
valid_data=valid_data,
restore_state=restore_state,
)
|
This model resume training of a classifier by reloading the appropriate state_dicts for each model
Args:
train_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the train split
model_path: the path to the saved checpoint for resuming training
valid_data: a tuple of Tensors (X,Y), a Dataset, or a DataLoader of
X (data) and Y (labels) for the dev split
|
def delete_report(server, report_number, timeout=HQ_DEFAULT_TIMEOUT):
"""
Delete a specific crash report from the server.
:param report_number: Report Number
:return: server response
"""
try:
r = requests.post(server + "/reports/delete/%d" % report_number, timeout=timeout)
except Exception as e:
logging.error(e)
return False
return r
|
Delete a specific crash report from the server.
:param report_number: Report Number
:return: server response
|
def get_vswhere_path():
"""
Get the path to vshwere.exe.
If vswhere is not already installed as part of Visual Studio, and no
alternate path is given using `set_vswhere_path()`, the latest release will
be downloaded and stored alongside this script.
"""
if alternate_path and os.path.exists(alternate_path):
return alternate_path
if DEFAULT_PATH and os.path.exists(DEFAULT_PATH):
return DEFAULT_PATH
if os.path.exists(DOWNLOAD_PATH):
return DOWNLOAD_PATH
_download_vswhere()
return DOWNLOAD_PATH
|
Get the path to vshwere.exe.
If vswhere is not already installed as part of Visual Studio, and no
alternate path is given using `set_vswhere_path()`, the latest release will
be downloaded and stored alongside this script.
|
def add_children_gos(self, gos):
"""Return children of input gos plus input gos."""
lst = []
obo_dag = self.obo_dag
get_children = lambda go_obj: list(go_obj.get_all_children()) + [go_obj.id]
for go_id in gos:
go_obj = obo_dag[go_id]
lst.extend(get_children(go_obj))
return set(lst)
|
Return children of input gos plus input gos.
|
def _set_if_type(self, v, load=False):
"""
Setter method for if_type, mapped from YANG variable /mpls_state/dynamic_bypass/dynamic_bypass_interface/if_type (mpls-if-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_if_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_if_type() directly.
YANG Description: Interface type
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loopback-interface': {'value': 7}, u'ethernet-interface': {'value': 2}, u'port-channel-interface': {'value': 5}, u'unknown-interface': {'value': 1}, u've-interface': {'value': 6}, u'fbr-channel-interface': {'value': 8}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-if-type', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """if_type must be of a type compatible with mpls-if-type""",
'defined-type': "brocade-mpls-operational:mpls-if-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'loopback-interface': {'value': 7}, u'ethernet-interface': {'value': 2}, u'port-channel-interface': {'value': 5}, u'unknown-interface': {'value': 1}, u've-interface': {'value': 6}, u'fbr-channel-interface': {'value': 8}},), is_leaf=True, yang_name="if-type", rest_name="if-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-mpls-operational', defining_module='brocade-mpls-operational', yang_type='mpls-if-type', is_config=False)""",
})
self.__if_type = t
if hasattr(self, '_set'):
self._set()
|
Setter method for if_type, mapped from YANG variable /mpls_state/dynamic_bypass/dynamic_bypass_interface/if_type (mpls-if-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_if_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_if_type() directly.
YANG Description: Interface type
|
def graph_from_labels(label_image,
fg_markers,
bg_markers,
regional_term = False,
boundary_term = False,
regional_term_args = False,
boundary_term_args = False):
"""
Create a graph-cut ready graph to segment a nD image using the region neighbourhood.
Create a `~medpy.graphcut.maxflow.GraphDouble` object for all regions of a nD label
image.
Every region of the label image is regarded as a node. They are connected to their
immediate neighbours by arcs. If to regions are neighbours is determined using
:math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D).
In the next step the arcs weights (n-weights) are computed using the supplied
``boundary_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All regions that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered regions receive a
maximum (`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink.
All other t-weights are set using the supplied ``regional_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Parameters
----------
label_image: ndarray
The label image as an array cwhere each voxel carries the id of the region it
belongs to. Note that the region labels have to start from 1 and be continuous
(can be achieved with `~medpy.filter.label.relabel`).
fg_markers : ndarray
The foreground markers as binary array of the same shape as the original image.
bg_markers : ndarray
The background markers as binary array of the same shape as the original image.
regional_term : function
This can be either `False`, in which case all t-weights are set to 0, except for
the nodes that are directly connected to the source or sink; or a function, in
which case the supplied function is used to compute the t_edges. It has to
have the following signature *regional_term(graph, regional_term_args)*, and is
supposed to compute (source_t_weight, sink_t_weight) for all regions of the image
and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights
have only to be computed for nodes where they do not equal zero. Additional
parameters can be passed to the function via the ``regional_term_args`` parameter.
boundary_term : function
This can be either `False`, in which case all n-edges, i.e. between all nodes
that are not source or sink, are set to 0; or a function, in which case the
supplied function is used to compute the edge weights. It has to have the
following signature *boundary_term(graph, boundary_term_args)*, and is supposed
to compute the edges between all adjacent regions of the image and to add them
to the supplied `~medpy.graphcut.graph.GCGraph` object. Additional parameters
can be passed to the function via the ``boundary_term_args`` parameter.
regional_term_args : tuple
Use this to pass some additional parameters to the ``regional_term`` function.
boundary_term_args : tuple
Use this to pass some additional parameters to the ``boundary_term`` function.
Returns
-------
graph : `~medpy.graphcut.maxflow.GraphDouble`
The created graph, ready to execute the graph-cut.
Raises
------
AttributeError
If an argument is malformed.
FunctionError
If one of the supplied functions returns unexpected results.
Notes
-----
If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
All arcs whose weight is not explicitly set are assumed to carry a weight of zero.
"""
# prepare logger
logger = Logger.getInstance()
logger.info('Performing attribute tests...')
# check, set and convert all supplied parameters
label_image = scipy.asarray(label_image)
fg_markers = scipy.asarray(fg_markers, dtype=scipy.bool_)
bg_markers = scipy.asarray(bg_markers, dtype=scipy.bool_)
__check_label_image(label_image)
# set dummy functions if not supplied
if not regional_term: regional_term = __regional_term_label
if not boundary_term: boundary_term = __boundary_term_label
# check supplied functions and their signature
if not hasattr(regional_term, '__call__') or not 3 == len(inspect.getargspec(regional_term)[0]):
raise AttributeError('regional_term has to be a callable object which takes three parameters.')
if not hasattr(boundary_term, '__call__') or not 3 == len(inspect.getargspec(boundary_term)[0]):
raise AttributeError('boundary_term has to be a callable object which takes three parameters.')
logger.info('Determining number of nodes and edges.')
# compute number of nodes and edges
nodes = len(scipy.unique(label_image))
# POSSIBILITY 1: guess the number of edges (in the best situation is faster but requires a little bit more memory. In the worst is slower.)
edges = 10 * nodes
logger.debug('guessed: #nodes={} nodes / #edges={}'.format(nodes, edges))
# POSSIBILITY 2: compute the edges (slow)
#edges = len(__compute_edges(label_image))
#logger.debug('computed: #nodes={} nodes / #edges={}'.format(nodes, edges))
# prepare result graph
graph = GCGraph(nodes, edges)
logger.debug('#hardwired-nodes source/sink={}/{}'.format(len(scipy.unique(label_image[fg_markers])),
len(scipy.unique(label_image[bg_markers]))))
#logger.info('Extracting the regions bounding boxes...')
# extract the bounding boxes
#bounding_boxes = find_objects(label_image)
# compute the weights of all edges from the source and to the sink i.e.
# compute the weights of the t_edges Wt
logger.info('Computing and adding terminal edge weights...')
#regions = set(graph.get_nodes()) - set(graph.get_source_nodes()) - set(graph.get_sink_nodes())
regional_term(graph, label_image, regional_term_args) # bounding boxes indexed from 0 # old version: regional_term(graph, label_image, regions, bounding_boxes, regional_term_args)
# compute the weights of the edges between the neighbouring nodes i.e.
# compute the weights of the n_edges Wr
logger.info('Computing and adding inter-node edge weights...')
boundary_term(graph, label_image, boundary_term_args)
# collect all regions that are under the foreground resp. background markers i.e.
# collect all nodes that are connected to the source resp. sink
logger.info('Setting terminal weights for the markers...')
graph.set_source_nodes(scipy.unique(label_image[fg_markers] - 1)) # requires -1 to adapt to node id system
graph.set_sink_nodes(scipy.unique(label_image[bg_markers] - 1))
return graph.get_graph()
|
Create a graph-cut ready graph to segment a nD image using the region neighbourhood.
Create a `~medpy.graphcut.maxflow.GraphDouble` object for all regions of a nD label
image.
Every region of the label image is regarded as a node. They are connected to their
immediate neighbours by arcs. If to regions are neighbours is determined using
:math:`ndim*2`-connectedness (e.g. :math:`3*2=6` for 3D).
In the next step the arcs weights (n-weights) are computed using the supplied
``boundary_term`` function (see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Implicitly the graph holds two additional nodes: the source and the sink, so called
terminal nodes. These are connected with all other nodes through arcs of an initial
weight (t-weight) of zero.
All regions that are under the foreground markers are considered to be tightly bound
to the source: The t-weight of the arc from source to these nodes is set to a maximum
value. The same goes for the background markers: The covered regions receive a
maximum (`~medpy.graphcut.graph.GCGraph.MAX`) t-weight for their arc towards the sink.
All other t-weights are set using the supplied ``regional_term`` function
(see :mod:`~medpy.graphcut.energy_voxel` for a selection).
Parameters
----------
label_image: ndarray
The label image as an array cwhere each voxel carries the id of the region it
belongs to. Note that the region labels have to start from 1 and be continuous
(can be achieved with `~medpy.filter.label.relabel`).
fg_markers : ndarray
The foreground markers as binary array of the same shape as the original image.
bg_markers : ndarray
The background markers as binary array of the same shape as the original image.
regional_term : function
This can be either `False`, in which case all t-weights are set to 0, except for
the nodes that are directly connected to the source or sink; or a function, in
which case the supplied function is used to compute the t_edges. It has to
have the following signature *regional_term(graph, regional_term_args)*, and is
supposed to compute (source_t_weight, sink_t_weight) for all regions of the image
and add these to the passed `~medpy.graphcut.graph.GCGraph` object. The weights
have only to be computed for nodes where they do not equal zero. Additional
parameters can be passed to the function via the ``regional_term_args`` parameter.
boundary_term : function
This can be either `False`, in which case all n-edges, i.e. between all nodes
that are not source or sink, are set to 0; or a function, in which case the
supplied function is used to compute the edge weights. It has to have the
following signature *boundary_term(graph, boundary_term_args)*, and is supposed
to compute the edges between all adjacent regions of the image and to add them
to the supplied `~medpy.graphcut.graph.GCGraph` object. Additional parameters
can be passed to the function via the ``boundary_term_args`` parameter.
regional_term_args : tuple
Use this to pass some additional parameters to the ``regional_term`` function.
boundary_term_args : tuple
Use this to pass some additional parameters to the ``boundary_term`` function.
Returns
-------
graph : `~medpy.graphcut.maxflow.GraphDouble`
The created graph, ready to execute the graph-cut.
Raises
------
AttributeError
If an argument is malformed.
FunctionError
If one of the supplied functions returns unexpected results.
Notes
-----
If a voxel is marked as both, foreground and background, the background marker
is given higher priority.
All arcs whose weight is not explicitly set are assumed to carry a weight of zero.
|
async def load_blob(reader, elem_type, params=None, elem=None):
"""
Loads blob from reader to the element. Returns the loaded blob.
:param reader:
:param elem_type:
:param params:
:param elem:
:return:
"""
ivalue = elem_type.SIZE if elem_type.FIX_SIZE else await load_uvarint(reader)
fvalue = bytearray(ivalue)
await reader.areadinto(fvalue)
if elem is None:
return fvalue # array by default
elif isinstance(elem, BlobType):
setattr(elem, elem_type.DATA_ATTR, fvalue)
return elem
else:
elem.extend(fvalue)
return elem
|
Loads blob from reader to the element. Returns the loaded blob.
:param reader:
:param elem_type:
:param params:
:param elem:
:return:
|
def fold_enrichment(self):
"""(property) Returns the fold enrichment at the XL-mHG cutoff."""
return self.k / (self.K*(self.cutoff/float(self.N)))
|
(property) Returns the fold enrichment at the XL-mHG cutoff.
|
def copy_resource(self, container, resource, local_filename):
"""
Identical to :meth:`dockermap.client.base.DockerClientWrapper.copy_resource` with additional logging.
"""
self.push_log("Receiving tarball for resource '{0}:{1}' and storing as {2}".format(container, resource, local_filename))
super(DockerFabricClient, self).copy_resource(container, resource, local_filename)
|
Identical to :meth:`dockermap.client.base.DockerClientWrapper.copy_resource` with additional logging.
|
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
|
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
|
def get_model_url_name(model_nfo, page, with_namespace=False):
"""Returns a URL for a given Tree admin page type."""
prefix = ''
if with_namespace:
prefix = 'admin:'
return ('%s%s_%s' % (prefix, '%s_%s' % model_nfo, page)).lower()
|
Returns a URL for a given Tree admin page type.
|
def _plot_extension(self, gta, prefix, src, loge_bounds=None, **kwargs):
"""Utility function for generating diagnostic plots for the
extension analysis."""
# format = kwargs.get('format', self.config['plotting']['format'])
if loge_bounds is None:
loge_bounds = (self.energies[0], self.energies[-1])
name = src['name'].lower().replace(' ', '_')
esuffix = '_%.3f_%.3f' % (loge_bounds[0], loge_bounds[1])
p = ExtensionPlotter(src, self.roi, '',
self.config['fileio']['workdir'],
loge_bounds=loge_bounds)
fig = plt.figure()
p.plot(0)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(0)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_xproj%s.png' % (
prefix, name, esuffix)))
plt.close(fig)
fig = plt.figure()
p.plot(1)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(1)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_yproj%s.png' % (
prefix, name, esuffix)))
plt.close(fig)
for i, c in enumerate(self.components):
suffix = '_%02i' % i
p = ExtensionPlotter(src, self.roi, suffix,
self.config['fileio']['workdir'],
loge_bounds=loge_bounds)
fig = plt.figure()
p.plot(0)
ROIPlotter.setup_projection_axis(0, loge_bounds=loge_bounds)
annotate(src=src, loge_bounds=loge_bounds)
plt.gca().set_xlim(-2, 2)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_xproj%s%s.png' % (
prefix, name, esuffix, suffix)))
plt.close(fig)
fig = plt.figure()
p.plot(1)
plt.gca().set_xlim(-2, 2)
ROIPlotter.setup_projection_axis(1, loge_bounds=loge_bounds)
annotate(src=src, loge_bounds=loge_bounds)
plt.savefig(os.path.join(self.config['fileio']['workdir'],
'%s_%s_extension_yproj%s%s.png' % (
prefix, name, esuffix, suffix)))
plt.close(fig)
|
Utility function for generating diagnostic plots for the
extension analysis.
|
def bz2_decompress_stream(src):
"""Decompress data from `src`.
Args:
src (iterable): iterable that yields blocks of compressed data
Yields:
blocks of uncompressed data
"""
dec = bz2.BZ2Decompressor()
for block in src:
decoded = dec.decompress(block)
if decoded:
yield decoded
|
Decompress data from `src`.
Args:
src (iterable): iterable that yields blocks of compressed data
Yields:
blocks of uncompressed data
|
def error(self, error):
"""
Defines a simulated exception error that will be raised.
Arguments:
error (str|Exception): error to raise.
Returns:
self: current Mock instance.
"""
self._error = RuntimeError(error) if isinstance(error, str) else error
|
Defines a simulated exception error that will be raised.
Arguments:
error (str|Exception): error to raise.
Returns:
self: current Mock instance.
|
def intersection(self, *others):
"""Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
"""
return self.copy(super(NGram, self).intersection(*others))
|
Return the intersection of two or more sets as a new set.
>>> from ngram import NGram
>>> a = NGram(['spam', 'eggs'])
>>> b = NGram(['spam', 'ham'])
>>> list(a.intersection(b))
['spam']
|
def resolve_frompath(pkgpath, relpath, level=0):
"""Resolves the path of the module referred to by 'from ..x import y'."""
if level == 0:
return relpath
parts = pkgpath.split('.') + ['_']
parts = parts[:-level] + (relpath.split('.') if relpath else [])
return '.'.join(parts)
|
Resolves the path of the module referred to by 'from ..x import y'.
|
def set_condition(self, condition = True):
"""
Sets a new condition callback for the breakpoint.
@see: L{__init__}
@type condition: function
@param condition: (Optional) Condition callback function.
"""
if condition is None:
self.__condition = True
else:
self.__condition = condition
|
Sets a new condition callback for the breakpoint.
@see: L{__init__}
@type condition: function
@param condition: (Optional) Condition callback function.
|
def load_cash_balances(self):
""" Loads cash balances from GnuCash book and recalculates into the default currency """
from gnucash_portfolio.accounts import AccountsAggregate, AccountAggregate
cfg = self.__get_config()
cash_root_name = cfg.get(ConfigKeys.cash_root)
# Load cash from all accounts under the root.
gc_db = self.config.get(ConfigKeys.gnucash_book_path)
with open_book(gc_db, open_if_lock=True) as book:
svc = AccountsAggregate(book)
root_account = svc.get_by_fullname(cash_root_name)
acct_svc = AccountAggregate(book, root_account)
cash_balances = acct_svc.load_cash_balances_with_children(cash_root_name)
# Treat each sum per currency as a Stock, for display in full mode.
self.__store_cash_balances_per_currency(cash_balances)
|
Loads cash balances from GnuCash book and recalculates into the default currency
|
def encrypt(self, data):
'''
encrypt data with AES-CBC and sign it with HMAC-SHA256
'''
aes_key, hmac_key = self.keys
pad = self.AES_BLOCK_SIZE - len(data) % self.AES_BLOCK_SIZE
if six.PY2:
data = data + pad * chr(pad)
else:
data = data + salt.utils.stringutils.to_bytes(pad * chr(pad))
iv_bytes = os.urandom(self.AES_BLOCK_SIZE)
if HAS_M2:
cypher = EVP.Cipher(alg='aes_192_cbc', key=aes_key, iv=iv_bytes, op=1, padding=False)
encr = cypher.update(data)
encr += cypher.final()
else:
cypher = AES.new(aes_key, AES.MODE_CBC, iv_bytes)
encr = cypher.encrypt(data)
data = iv_bytes + encr
sig = hmac.new(hmac_key, data, hashlib.sha256).digest()
return data + sig
|
encrypt data with AES-CBC and sign it with HMAC-SHA256
|
def this(obj, **kwargs):
"""Prints series of debugging steps to user.
Runs through pipeline of functions and print results of each.
"""
verbose = kwargs.get("verbose", True)
if verbose:
print('{:=^30}'.format(" whatis.this? "))
for func in pipeline:
s = func(obj, **kwargs)
if s is not None:
print(s)
if verbose:
print('{:=^30}\n'.format(" whatis.this? "))
|
Prints series of debugging steps to user.
Runs through pipeline of functions and print results of each.
|
def remove_action(self, action, sub_menu='Advanced'):
"""
Removes an action/separator from the editor's context menu.
:param action: Action/seprator to remove.
:param advanced: True to remove the action from the advanced submenu.
"""
if sub_menu:
try:
mnu = self._sub_menus[sub_menu]
except KeyError:
pass
else:
mnu.removeAction(action)
else:
try:
self._actions.remove(action)
except ValueError:
pass
self.removeAction(action)
|
Removes an action/separator from the editor's context menu.
:param action: Action/seprator to remove.
:param advanced: True to remove the action from the advanced submenu.
|
def is_valid_geometry(self):
"""
It is possible to infer the geometry only if exactly
one of sites, sites_csv, hazard_curves_csv, gmfs_csv,
region is set. You did set more than one, or nothing.
"""
has_sites = (self.sites is not None or 'sites' in self.inputs
or 'site_model' in self.inputs)
if not has_sites and not self.ground_motion_fields:
# when generating only the ruptures you do not need the sites
return True
if ('gmfs' in self.inputs and not has_sites and
not self.inputs['gmfs'].endswith('.xml')):
raise ValueError('Missing sites or sites_csv in the .ini file')
elif ('risk' in self.calculation_mode or
'damage' in self.calculation_mode or
'bcr' in self.calculation_mode):
return True # no check on the sites for risk
flags = dict(
sites=bool(self.sites),
sites_csv=self.inputs.get('sites', 0),
hazard_curves_csv=self.inputs.get('hazard_curves', 0),
gmfs_csv=self.inputs.get('gmfs', 0),
region=bool(self.region and self.region_grid_spacing))
# NB: below we check that all the flags
# are mutually exclusive
return sum(bool(v) for v in flags.values()) == 1 or self.inputs.get(
'exposure') or self.inputs.get('site_model')
|
It is possible to infer the geometry only if exactly
one of sites, sites_csv, hazard_curves_csv, gmfs_csv,
region is set. You did set more than one, or nothing.
|
def update_kwargs(self, kwargs, count, offset):
"""
Helper to support handy dictionaries merging on all Python versions.
"""
kwargs.update({self.count_key: count, self.offset_key: offset})
return kwargs
|
Helper to support handy dictionaries merging on all Python versions.
|
def filters_query(filters):
""" Turn the tuple of filters into SQL WHERE statements
The key (column name) & operator have already been vetted
so they can be trusted but the value could still be evil
so it MUST be a parameterized input!
That is done by creating a param dict where they key name
& val look like:
'{}_{}'.format(key, oper): val
The key is constructed the way it is to ensure uniqueness,
if we just used the key name then it could get clobbered.
Ultimately the WHERE statement will look something like:
age >= {age_gte}
where age_gte is the key name in the param dict with a
value of the evil user input. In the end, a string
statement & dict param are returned as a tuple if any
filters were provided otherwise None.
:return: tuple (string, dict)
"""
def _cast_val(filtr):
""" Perform any needed casting on the filter value
This could be tasks like including '%' signs at
certain anchor points based on the filter or
even wrapping it in certain functions.
"""
val = filtr.val
if filtr.oper in ('contains', 'icontains'):
val = '%' + filtr.val + '%'
elif filtr.oper == 'endswith':
val = '%' + filtr.val
elif filtr.oper == 'startswith':
val = filtr.val + '%'
return val
def _filter(filtr):
""" Process each individual Filter object """
oper = FILTER_TABLE[filtr.oper]
prop = '{field}_{oper}'.format(
field=filtr.field.replace('.', '_'),
oper=filtr.oper,
)
if isinstance(filtr, FilterRel):
stmt = _filter_rel(filtr, oper, prop)
else:
stmt = '{field} {oper} %({prop})s'.format(
field=filtr.field,
oper=oper,
prop=prop,
)
return stmt, {prop: _cast_val(filtr)}
def _filter_or(filters):
""" Given a FilterOr object return a SQL query """
param = {}
stmts = []
for filtr in filters:
vals = _filter(filtr)
param.update(vals[1])
stmts.append(vals[0])
stmt = ' OR '.join(stmts)
stmt = '({})'.format(stmt)
return stmt, param
def _filter_rel(rel, oper, prop):
""" Given a FilterRel object return a SQL sub query """
stmt = """
{field} IN (SELECT {foreign_field} FROM {foreign_rtype}
WHERE {foreign_filter} {oper} %({prop})s)
"""
return stmt.format(
field=rel.local_field,
foreign_field=rel.foreign_field,
foreign_filter=rel.foreign_filter,
foreign_rtype=rel.foreign_rtype,
oper=oper,
prop=prop,
)
param = {}
stmts = []
for filtr in filters:
if isinstance(filtr, FilterOr):
vals = _filter_or(filtr)
else:
vals = _filter(filtr)
param.update(vals[1])
stmts.append(vals[0])
if stmts:
stmt = ' AND '.join(stmts)
stmt = ' WHERE ' + stmt
return stmt, param
|
Turn the tuple of filters into SQL WHERE statements
The key (column name) & operator have already been vetted
so they can be trusted but the value could still be evil
so it MUST be a parameterized input!
That is done by creating a param dict where they key name
& val look like:
'{}_{}'.format(key, oper): val
The key is constructed the way it is to ensure uniqueness,
if we just used the key name then it could get clobbered.
Ultimately the WHERE statement will look something like:
age >= {age_gte}
where age_gte is the key name in the param dict with a
value of the evil user input. In the end, a string
statement & dict param are returned as a tuple if any
filters were provided otherwise None.
:return: tuple (string, dict)
|
def get_pdf(article, debug=False):
"""
Download an article PDF from arXiv.
:param article:
The ADS article to retrieve.
:type article:
:class:`ads.search.Article`
:returns:
The binary content of the requested PDF.
"""
print('Retrieving {0}'.format(article))
identifier = [_ for _ in article.identifier if 'arXiv' in _]
if identifier:
url = 'http://arXiv.org/pdf/{0}.{1}'.format(identifier[0][9:13],
''.join(_ for _ in identifier[0][14:] if _.isdigit()))
else:
# No arXiv version. Ask ADS to redirect us to the journal article.
params = {
'bibcode': article.bibcode,
'link_type': 'ARTICLE',
'db_key': 'AST'
}
url = requests.get('http://adsabs.harvard.edu/cgi-bin/nph-data_query',
params=params).url
q = requests.get(url)
if not q.ok:
print('Error retrieving {0}: {1} for {2}'.format(
article, q.status_code, url))
if debug: q.raise_for_status()
else: return None
# Check if the journal has given back forbidden HTML.
if q.content.endswith('</html>'):
print('Error retrieving {0}: 200 (access denied?) for {1}'.format(
article, url))
return None
return q.content
|
Download an article PDF from arXiv.
:param article:
The ADS article to retrieve.
:type article:
:class:`ads.search.Article`
:returns:
The binary content of the requested PDF.
|
def identify_col_pos(txt):
"""
assume no delimiter in this file, so guess the best
fixed column widths to split by
"""
res = []
#res.append(0)
lines = txt.split('\n')
prev_ch = ''
for col_pos, ch in enumerate(lines[0]):
if _is_white_space(ch) is False and _is_white_space(prev_ch) is True:
res.append(col_pos)
prev_ch = ch
res.append(col_pos)
return res
|
assume no delimiter in this file, so guess the best
fixed column widths to split by
|
def overlap_correlation(wnd, hop):
""" Overlap correlation percent for the given overlap hop in samples. """
return sum(wnd * Stream(wnd).skip(hop)) / sum(el ** 2 for el in wnd)
|
Overlap correlation percent for the given overlap hop in samples.
|
def is_binary_file(file):
"""
Returns if given file is a binary file.
:param file: File path.
:type file: unicode
:return: Is file binary.
:rtype: bool
"""
file_handle = open(file, "rb")
try:
chunk_size = 1024
while True:
chunk = file_handle.read(chunk_size)
if chr(0) in chunk:
return True
if len(chunk) < chunk_size:
break
finally:
file_handle.close()
return False
|
Returns if given file is a binary file.
:param file: File path.
:type file: unicode
:return: Is file binary.
:rtype: bool
|
def ToDatetime(self):
"""Converts Timestamp to datetime."""
return datetime.utcfromtimestamp(
self.seconds + self.nanos / float(_NANOS_PER_SECOND))
|
Converts Timestamp to datetime.
|
def add_coordinate_condition(self, droppable_id, container_id, coordinate, match=True):
"""stub"""
if not isinstance(coordinate, BasicCoordinate):
raise InvalidArgument('coordinate is not a BasicCoordinate')
self.my_osid_object_form._my_map['coordinateConditions'].append(
{'droppableId': droppable_id, 'containerId': container_id, 'coordinate': coordinate.get_values(), 'match': match})
self.my_osid_object_form._my_map['coordinateConditions'].sort(key=lambda k: k['containerId'])
|
stub
|
def _set_es_workers(self, **kwargs):
"""
Creates index worker instances for each class to index
kwargs:
-------
idx_only_base[bool]: True will only index the base class
"""
def make_es_worker(search_conn, es_index, es_doc_type, class_name):
"""
Returns a new es_worker instance
args:
-----
search_conn: the connection to elasticsearch
es_index: the name of the elasticsearch index
es_doc_type: the name of the elasticsearch doctype
class_name: name of the rdf class that is being indexed
"""
new_esbase = copy.copy(search_conn)
new_esbase.es_index = es_index
new_esbase.doc_type = es_doc_type
log.info("Indexing '%s' into ES index '%s' doctype '%s'",
class_name.pyuri,
es_index,
es_doc_type)
return new_esbase
def additional_indexers(rdf_class):
"""
returns additional classes to index based off of the es definitions
"""
rtn_list = rdf_class.es_indexers()
rtn_list.remove(rdf_class)
return rtn_list
self.es_worker = make_es_worker(self.search_conn,
self.es_index,
self.es_doc_type,
self.rdf_class.__name__)
if not kwargs.get("idx_only_base"):
self.other_indexers = {item.__name__: make_es_worker(
self.search_conn,
item.es_defs.get('kds_esIndex')[0],
item.es_defs.get('kds_esDocType')[0],
item.__name__)
for item in additional_indexers(self.rdf_class)}
else:
self.other_indexers = {}
|
Creates index worker instances for each class to index
kwargs:
-------
idx_only_base[bool]: True will only index the base class
|
def parse(format, string, extra_types=None, evaluate_result=True, case_sensitive=False):
'''Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
The default behaviour is to match strings case insensitively. You may match with
case by specifying case_sensitive=True.
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
'''
p = Parser(format, extra_types=extra_types, case_sensitive=case_sensitive)
return p.parse(string, evaluate_result=evaluate_result)
|
Using "format" attempt to pull values from "string".
The format must match the string contents exactly. If the value
you're looking for is instead just a part of the string use
search().
If ``evaluate_result`` is True the return value will be an Result instance with two attributes:
.fixed - tuple of fixed-position values from the string
.named - dict of named values from the string
If ``evaluate_result`` is False the return value will be a Match instance with one method:
.evaluate_result() - This will return a Result instance like you would get
with ``evaluate_result`` set to True
The default behaviour is to match strings case insensitively. You may match with
case by specifying case_sensitive=True.
If the format is invalid a ValueError will be raised.
See the module documentation for the use of "extra_types".
In the case there is no match parse() will return None.
|
def past_trades(self, symbol='btcusd', limit_trades=50, timestamp=0):
"""
Send a trade history request, return the response.
Arguements:
symbol -- currency symbol (default 'btcusd')
limit_trades -- maximum number of trades to return (default 50)
timestamp -- only return trades after this unix timestamp (default 0)
"""
request = '/v1/mytrades'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'symbol': symbol,
'limit_trades': limit_trades,
'timestamp': timestamp
}
return requests.post(url, headers=self.prepare(params))
|
Send a trade history request, return the response.
Arguements:
symbol -- currency symbol (default 'btcusd')
limit_trades -- maximum number of trades to return (default 50)
timestamp -- only return trades after this unix timestamp (default 0)
|
def read_file_header(fd, endian):
"""Read mat 5 file header of the file fd.
Returns a dict with header values.
"""
fields = [
('description', 's', 116),
('subsystem_offset', 's', 8),
('version', 'H', 2),
('endian_test', 's', 2)
]
hdict = {}
for name, fmt, num_bytes in fields:
data = fd.read(num_bytes)
hdict[name] = unpack(endian, fmt, data)
hdict['description'] = hdict['description'].strip()
v_major = hdict['version'] >> 8
v_minor = hdict['version'] & 0xFF
hdict['__version__'] = '%d.%d' % (v_major, v_minor)
return hdict
|
Read mat 5 file header of the file fd.
Returns a dict with header values.
|
def follow_cf(save, Uspan, target_cf, nup, n_tot=5.0, slsp=None):
"""Calculates the quasiparticle weight in single
site spin hamiltonian under with N degenerate half-filled orbitals """
if slsp == None:
slsp = Spinon(slaves=6, orbitals=3, avg_particles=n_tot,
hopping=[0.5]*6, populations = np.asarray([n_tot]*6)/6)
zet, lam, mu, mean_f = [], [], [], []
for co in Uspan:
print('U=', co, 'del=', target_cf)
res=root(targetpop, nup[-1],(co,target_cf,slsp, n_tot))
print(res.x)
if res.x>nup[-1]: break
nup.append(res.x)
slsp.param['populations']=population_distri(nup[-1])
mean_f.append(slsp.mean_field())
zet.append(slsp.quasiparticle_weight())
lam.append(slsp.param['lambda'])
mu.append(orbital_energies(slsp.param, zet[-1]))
# plt.plot(np.asarray(zet)[:,0], label='d={}, zl'.format(str(target_cf)))
# plt.plot(np.asarray(zet)[:,5], label='d={}, zh'.format(str(target_cf)))
case = save.createGroup('cf={}'.format(target_cf))
varis = st.setgroup(case)
st.storegroup(varis, Uspan[:len(zet)], zet, lam, mu, nup[1:],target_cf,mean_f)
|
Calculates the quasiparticle weight in single
site spin hamiltonian under with N degenerate half-filled orbitals
|
def slugify(cls, s):
"""Return the slug version of the string ``s``"""
slug = re.sub("[^0-9a-zA-Z-]", "-", s)
return re.sub("-{2,}", "-", slug).strip('-')
|
Return the slug version of the string ``s``
|
def map_noreturn(targ, argslist):
"""
parallel_call_noreturn(targ, argslist)
:Parameters:
- targ : function
- argslist : list of tuples
Does [targ(*args) for args in argslist] using the threadpool.
"""
# Thanks to Anne Archibald's handythread.py for the exception handling
# mechanism.
exceptions = []
n_threads = len(argslist)
exc_lock = threading.Lock()
done_lock = CountDownLatch(n_threads)
def eb(wr, el=exc_lock, ex=exceptions, dl=done_lock):
el.acquire()
ex.append(sys.exc_info())
el.release()
dl.countdown()
def cb(wr, value, dl=done_lock):
dl.countdown()
for args in argslist:
__PyMCThreadPool__.putRequest(
WorkRequest(targ,
callback=cb,
exc_callback=eb,
args=args,
requestID=id(args)))
done_lock.await_lock()
if exceptions:
six.reraise(*exceptions[0])
|
parallel_call_noreturn(targ, argslist)
:Parameters:
- targ : function
- argslist : list of tuples
Does [targ(*args) for args in argslist] using the threadpool.
|
def p_ctx_coords(self, p):
""" ctx_coords : multiplicative_path
| ctx_coords COLON multiplicative_path"""
if len(p) == 2:
p[0] = [p[1]]
else:
p[0] = p[1] + [p[3]]
|
ctx_coords : multiplicative_path
| ctx_coords COLON multiplicative_path
|
def _GetDirectory(self):
"""Retrieves a directory.
Returns:
TARDirectory: a directory or None if not available.
"""
if self.entry_type != definitions.FILE_ENTRY_TYPE_DIRECTORY:
return None
return TARDirectory(self._file_system, self.path_spec)
|
Retrieves a directory.
Returns:
TARDirectory: a directory or None if not available.
|
def from_filename(self, filename):
'''
Build an IntentSchema from a file path
creates a new intent schema if the file does not exist, throws an error if the file
exists but cannot be loaded as a JSON
'''
if os.path.exists(filename):
with open(filename) as fp:
return IntentSchema(json.load(fp, object_pairs_hook=OrderedDict))
else:
print ('File does not exist')
return IntentSchema()
|
Build an IntentSchema from a file path
creates a new intent schema if the file does not exist, throws an error if the file
exists but cannot be loaded as a JSON
|
def wrap_socket(self, sock, server_side=False,
do_handshake_on_connect=True,
suppress_ragged_eofs=True, dummy=None):
"""Wrap an existing Python socket sock and return an ssl.SSLSocket
object.
"""
return ssl.wrap_socket(sock, keyfile=self._keyfile,
certfile=self._certfile,
server_side=server_side,
cert_reqs=self._verify_mode,
ssl_version=self._protocol,
ca_certs=self._cafile,
do_handshake_on_connect=do_handshake_on_connect,
suppress_ragged_eofs=suppress_ragged_eofs)
|
Wrap an existing Python socket sock and return an ssl.SSLSocket
object.
|
def UpsertUserDefinedFunction(self, collection_link, udf, options=None):
"""Upserts a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict
"""
if options is None:
options = {}
collection_id, path, udf = self._GetContainerIdWithPathForUDF(collection_link, udf)
return self.Upsert(udf,
path,
'udfs',
collection_id,
None,
options)
|
Upserts a user defined function in a collection.
:param str collection_link:
The link to the collection.
:param str udf:
:param dict options:
The request options for the request.
:return:
The upserted UDF.
:rtype:
dict
|
def put_attachment(self, attachmentid, attachment_update):
'''http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#update-attachment'''
assert type(attachment_update) is DotDict
if (not 'ids' in attachment_update):
attachment_update.ids = [attachmentid]
return self._put('bug/attachment/{attachmentid}'.format(attachmentid=attachmentid),
json.dumps(attachment_update))
|
http://bugzilla.readthedocs.org/en/latest/api/core/v1/attachment.html#update-attachment
|
def get_result(self, decorated_function, *args, **kwargs):
""" Get result from storage for specified function. Will raise an exception
(:class:`.WCacheStorage.CacheMissedException`) if there is no cached result.
:param decorated_function: called function (original)
:param args: args with which function is called
:param kwargs: kwargs with which function is called
:return: (any type, even None)
"""
cache_entry = self.get_cache(decorated_function, *args, **kwargs)
if cache_entry.has_value is False:
raise WCacheStorage.CacheMissedException('No cache record found')
return cache_entry.cached_value
|
Get result from storage for specified function. Will raise an exception
(:class:`.WCacheStorage.CacheMissedException`) if there is no cached result.
:param decorated_function: called function (original)
:param args: args with which function is called
:param kwargs: kwargs with which function is called
:return: (any type, even None)
|
def get_neighbors(self, site, r, include_index=False, include_image=False):
"""
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Which is the center of the sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
If include_index == True, the tuple for each neighbor also includes
the index of the neighbor.
If include_supercell == True, the tuple for each neighbor also includes
the index of supercell.
"""
nn = self.get_sites_in_sphere(site.coords, r,
include_index=include_index,
include_image=include_image)
return [d for d in nn if site != d[0]]
|
Get all neighbors to a site within a sphere of radius r. Excludes the
site itself.
Args:
site (Site): Which is the center of the sphere.
r (float): Radius of sphere.
include_index (bool): Whether the non-supercell site index
is included in the returned data
include_image (bool): Whether to include the supercell image
is included in the returned data
Returns:
[(site, dist) ...] since most of the time, subsequent processing
requires the distance.
If include_index == True, the tuple for each neighbor also includes
the index of the neighbor.
If include_supercell == True, the tuple for each neighbor also includes
the index of supercell.
|
def sample_counters(mc, system_info):
"""Sample every router counter in the machine."""
return {
(x, y): mc.get_router_diagnostics(x, y) for (x, y) in system_info
}
|
Sample every router counter in the machine.
|
def hasattrs(object, *names):
"""
Takes in an object and a variable length amount of named attributes,
and checks to see if the object has each property. If any of the
attributes are missing, this returns false.
:param object: an object that may or may not contain the listed attributes
:param names: a variable amount of attribute names to check for
:return: True if the object contains each named attribute, false otherwise
"""
for name in names:
if not hasattr(object, name):
return False
return True
|
Takes in an object and a variable length amount of named attributes,
and checks to see if the object has each property. If any of the
attributes are missing, this returns false.
:param object: an object that may or may not contain the listed attributes
:param names: a variable amount of attribute names to check for
:return: True if the object contains each named attribute, false otherwise
|
def default_vsan_policy_configured(name, policy):
'''
Configures the default VSAN policy on a vCenter.
The state assumes there is only one default VSAN policy on a vCenter.
policy
Dict representation of a policy
'''
# TODO Refactor when recurse_differ supports list_differ
# It's going to make the whole thing much easier
policy_copy = copy.deepcopy(policy)
proxy_type = __salt__['vsphere.get_proxy_type']()
log.trace('proxy_type = %s', proxy_type)
# All allowed proxies have a shim execution module with the same
# name which implementes a get_details function
# All allowed proxies have a vcenter detail
vcenter = __salt__['{0}.get_details'.format(proxy_type)]()['vcenter']
log.info('Running %s on vCenter \'%s\'', name, vcenter)
log.trace('policy = %s', policy)
changes_required = False
ret = {'name': name,
'changes': {},
'result': None,
'comment': None}
comments = []
changes = {}
changes_required = False
si = None
try:
#TODO policy schema validation
si = __salt__['vsphere.get_service_instance_via_proxy']()
current_policy = __salt__['vsphere.list_default_vsan_policy'](si)
log.trace('current_policy = %s', current_policy)
# Building all diffs between the current and expected policy
# XXX We simplify the comparison by assuming we have at most 1
# sub_profile
if policy.get('subprofiles'):
if len(policy['subprofiles']) > 1:
raise ArgumentValueError('Multiple sub_profiles ({0}) are not '
'supported in the input policy')
subprofile = policy['subprofiles'][0]
current_subprofile = current_policy['subprofiles'][0]
capabilities_differ = list_diff(current_subprofile['capabilities'],
subprofile.get('capabilities', []),
key='id')
del policy['subprofiles']
if subprofile.get('capabilities'):
del subprofile['capabilities']
del current_subprofile['capabilities']
# Get the subprofile diffs without the capability keys
subprofile_differ = recursive_diff(current_subprofile,
dict(subprofile))
del current_policy['subprofiles']
policy_differ = recursive_diff(current_policy, policy)
if policy_differ.diffs or capabilities_differ.diffs or \
subprofile_differ.diffs:
if 'name' in policy_differ.new_values or \
'description' in policy_differ.new_values:
raise ArgumentValueError(
'\'name\' and \'description\' of the default VSAN policy '
'cannot be updated')
changes_required = True
if __opts__['test']:
str_changes = []
if policy_differ.diffs:
str_changes.extend([change for change in
policy_differ.changes_str.split('\n')])
if subprofile_differ.diffs or capabilities_differ.diffs:
str_changes.append('subprofiles:')
if subprofile_differ.diffs:
str_changes.extend(
[' {0}'.format(change) for change in
subprofile_differ.changes_str.split('\n')])
if capabilities_differ.diffs:
str_changes.append(' capabilities:')
str_changes.extend(
[' {0}'.format(change) for change in
capabilities_differ.changes_str2.split('\n')])
comments.append(
'State {0} will update the default VSAN policy on '
'vCenter \'{1}\':\n{2}'
''.format(name, vcenter, '\n'.join(str_changes)))
else:
__salt__['vsphere.update_storage_policy'](
policy=current_policy['name'],
policy_dict=policy_copy,
service_instance=si)
comments.append('Updated the default VSAN policy in vCenter '
'\'{0}\''.format(vcenter))
log.info(comments[-1])
new_values = policy_differ.new_values
new_values['subprofiles'] = [subprofile_differ.new_values]
new_values['subprofiles'][0]['capabilities'] = \
capabilities_differ.new_values
if not new_values['subprofiles'][0]['capabilities']:
del new_values['subprofiles'][0]['capabilities']
if not new_values['subprofiles'][0]:
del new_values['subprofiles']
old_values = policy_differ.old_values
old_values['subprofiles'] = [subprofile_differ.old_values]
old_values['subprofiles'][0]['capabilities'] = \
capabilities_differ.old_values
if not old_values['subprofiles'][0]['capabilities']:
del old_values['subprofiles'][0]['capabilities']
if not old_values['subprofiles'][0]:
del old_values['subprofiles']
changes.update({'default_vsan_policy':
{'new': new_values,
'old': old_values}})
log.trace(changes)
__salt__['vsphere.disconnect'](si)
except CommandExecutionError as exc:
log.error('Error: %s', exc)
if si:
__salt__['vsphere.disconnect'](si)
if not __opts__['test']:
ret['result'] = False
ret.update({'comment': exc.strerror,
'result': False if not __opts__['test'] else None})
return ret
if not changes_required:
# We have no changes
ret.update({'comment': ('Default VSAN policy in vCenter '
'\'{0}\' is correctly configured. '
'Nothing to be done.'.format(vcenter)),
'result': True})
else:
ret.update({
'comment': '\n'.join(comments),
'changes': changes,
'result': None if __opts__['test'] else True,
})
return ret
|
Configures the default VSAN policy on a vCenter.
The state assumes there is only one default VSAN policy on a vCenter.
policy
Dict representation of a policy
|
def _add_token_span_to_document(self, span_element):
"""
adds an <intro>, <act> or <conclu> token span to the document.
"""
for token in span_element.text.split():
token_id = self._add_token_to_document(token)
if span_element.tag == 'act': # doc can have 0+ acts
self._add_spanning_relation('act_{}'.format(self.act_count),
token_id)
else: # <intro> or <conclu>
self._add_spanning_relation(span_element.tag, token_id)
if span_element.tag == 'act':
self.act_count += 1
|
adds an <intro>, <act> or <conclu> token span to the document.
|
def file_size(self, name, force_refresh=False):
"""Returns the size of the file.
For efficiency this operation does not use locking, so may return
inconsistent data. Use it for informational purposes.
"""
uname, version = split_name(name)
t = time.time()
logger.debug(' querying size of %s', name)
try:
if not self.remote_store or (version is not None
and not force_refresh):
try:
if self.local_store and self.local_store.exists(name):
return self.local_store.file_size(name)
except Exception:
if self.remote_store:
logger.warning("Error getting '%s' from local store",
name, exc_info=True)
else:
raise
if self.remote_store:
return self.remote_store.file_size(name)
raise FiletrackerError("File not available: %s" % name)
finally:
logger.debug(' processed %s in %.2fs', name, time.time() - t)
|
Returns the size of the file.
For efficiency this operation does not use locking, so may return
inconsistent data. Use it for informational purposes.
|
def register(self, schema):
"""Register input schema class.
When registering a schema, all inner schemas are registered as well.
:param Schema schema: schema to register.
:return: old registered schema.
:rtype: type
"""
result = None
uuid = schema.uuid
if uuid in self._schbyuuid:
result = self._schbyuuid[uuid]
if result != schema:
self._schbyuuid[uuid] = schema
name = schema.name
schemas = self._schbyname.setdefault(name, set())
schemas.add(schema)
for innername, innerschema in iteritems(schema.getschemas()):
if innerschema.uuid not in self._schbyuuid:
register(innerschema)
return result
|
Register input schema class.
When registering a schema, all inner schemas are registered as well.
:param Schema schema: schema to register.
:return: old registered schema.
:rtype: type
|
def wrap_and_format(self, width=None, include_params=False, include_return=False, excluded_params=None):
"""Wrap, format and print this docstring for a specific width.
Args:
width (int): The number of characters per line. If set to None
this will be inferred from the terminal width and default
to 80 if not passed or if passed as None and the terminal
width cannot be determined.
include_return (bool): Include the return information section
in the output.
include_params (bool): Include a parameter information section
in the output.
excluded_params (list): An optional list of parameter names to exclude.
Options for excluding things are, for example, 'self' or 'cls'.
"""
if excluded_params is None:
excluded_params = []
out = StringIO()
if width is None:
width, _height = get_terminal_size()
for line in self.maindoc:
if isinstance(line, Line):
out.write(fill(line.contents, width=width))
out.write('\n')
elif isinstance(line, BlankLine):
out.write('\n')
elif isinstance(line, ListItem):
out.write(fill(line.contents, initial_indent=" %s " % line.marker[0], subsequent_indent=" ", width=width))
out.write('\n')
if include_params:
included_params = set(self.param_info) - set(excluded_params)
if len(included_params) > 0:
out.write("\nParameters:\n")
for param in included_params:
info = self.param_info[param]
out.write(" - %s (%s):\n" % (param, info.type_name))
out.write(fill(info.desc, initial_indent=" ", subsequent_indent=" ", width=width))
out.write('\n')
if include_return:
print("Returns:")
print(" " + self.return_info.type_name)
#pylint:disable=fixme; Issue tracked in #32
# TODO: Also include description information here
return out.getvalue()
|
Wrap, format and print this docstring for a specific width.
Args:
width (int): The number of characters per line. If set to None
this will be inferred from the terminal width and default
to 80 if not passed or if passed as None and the terminal
width cannot be determined.
include_return (bool): Include the return information section
in the output.
include_params (bool): Include a parameter information section
in the output.
excluded_params (list): An optional list of parameter names to exclude.
Options for excluding things are, for example, 'self' or 'cls'.
|
def group_callback(self, iocb):
"""Callback when a child iocb completes."""
if _debug: IOGroup._debug("group_callback %r", iocb)
# check all the members
for iocb in self.ioMembers:
if not iocb.ioComplete.isSet():
if _debug: IOGroup._debug(" - waiting for child: %r", iocb)
break
else:
if _debug: IOGroup._debug(" - all children complete")
# everything complete
self.ioState = COMPLETED
self.trigger()
|
Callback when a child iocb completes.
|
def get_emitter(self, name: str) -> Callable[[Event], Event]:
"""Gets and emitter for a named event.
Parameters
----------
name :
The name of the event he requested emitter will emit.
Users may provide their own named events by requesting an emitter with this function,
but should do so with caution as it makes time much more difficult to think about.
Returns
-------
An emitter for the named event. The emitter should be called by the requesting component
at the appropriate point in the simulation lifecycle.
"""
return self._event_manager.get_emitter(name)
|
Gets and emitter for a named event.
Parameters
----------
name :
The name of the event he requested emitter will emit.
Users may provide their own named events by requesting an emitter with this function,
but should do so with caution as it makes time much more difficult to think about.
Returns
-------
An emitter for the named event. The emitter should be called by the requesting component
at the appropriate point in the simulation lifecycle.
|
def pipeline_exists(url, pipeline_id, auth, verify_ssl):
'''
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
'''
try:
pipeline_status(url, pipeline_id, auth, verify_ssl)['status']
return True
except requests.HTTPError:
return False
|
:param url: (str): the host url in the form 'http://host:port/'.
:param pipeline_id: (string) the pipeline identifier
:param auth: (tuple) a tuple of username, password
:return: (boolean)
|
def lnlike(self, model, refactor=False, pos_tol=2.5, neg_tol=50.,
full_output=False):
r"""
Return the likelihood of the astrophysical model `model`.
Returns the likelihood of `model` marginalized over the PLD model.
:param ndarray model: A vector of the same shape as `self.time` \
corresponding to the astrophysical model.
:param bool refactor: Re-compute the Cholesky decomposition? This \
typically does not need to be done, except when the PLD \
model changes. Default :py:obj:`False`.
:param float pos_tol: the positive (i.e., above the median) \
outlier tolerance in standard deviations.
:param float neg_tol: the negative (i.e., below the median) \
outlier tolerance in standard deviations.
:param bool full_output: If :py:obj:`True`, returns the maximum \
likelihood model amplitude and the variance on the amplitude \
in addition to the log-likelihood. In the case of a transit \
model, these are the transit depth and depth variance. Default \
:py:obj:`False`.
"""
lnl = 0
# Re-factorize the Cholesky decomposition?
try:
self._ll_info
except AttributeError:
refactor = True
if refactor:
# Smooth the light curve and reset the outlier mask
t = np.delete(self.time,
np.concatenate([self.nanmask, self.badmask]))
f = np.delete(self.flux,
np.concatenate([self.nanmask, self.badmask]))
f = SavGol(f)
med = np.nanmedian(f)
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
pos_inds = np.where((f > med + pos_tol * MAD))[0]
pos_inds = np.array([np.argmax(self.time == t[i])
for i in pos_inds])
MAD = 1.4826 * np.nanmedian(np.abs(f - med))
neg_inds = np.where((f < med - neg_tol * MAD))[0]
neg_inds = np.array([np.argmax(self.time == t[i])
for i in neg_inds])
outmask = np.array(self.outmask)
transitmask = np.array(self.transitmask)
self.outmask = np.concatenate([neg_inds, pos_inds])
self.transitmask = np.array([], dtype=int)
# Now re-factorize the Cholesky decomposition
self._ll_info = [None for b in self.breakpoints]
for b, brkpt in enumerate(self.breakpoints):
# Masks for current chunk
m = self.get_masked_chunk(b, pad=False)
# This block of the masked covariance matrix
K = GetCovariance(self.kernel, self.kernel_params,
self.time[m], self.fraw_err[m])
# The masked X.L.X^T term
A = np.zeros((len(m), len(m)))
for n in range(self.pld_order):
XM = self.X(n, m)
A += self.lam[b][n] * np.dot(XM, XM.T)
K += A
self._ll_info[b] = [cho_factor(K), m]
# Reset the outlier masks
self.outmask = outmask
self.transitmask = transitmask
# Compute the likelihood for each chunk
amp = [None for b in self.breakpoints]
var = [None for b in self.breakpoints]
for b, brkpt in enumerate(self.breakpoints):
# Get the inverse covariance and the mask
CDK = self._ll_info[b][0]
m = self._ll_info[b][1]
# Compute the maximum likelihood model amplitude
# (for transits, this is the transit depth)
var[b] = 1. / np.dot(model[m], cho_solve(CDK, model[m]))
amp[b] = var[b] * np.dot(model[m], cho_solve(CDK, self.fraw[m]))
# Compute the residual
r = self.fraw[m] - amp[b] * model[m]
# Finally, compute the likelihood
lnl += -0.5 * np.dot(r, cho_solve(CDK, r))
if full_output:
# We need to multiply the Gaussians for all chunks to get the
# amplitude and amplitude variance for the entire dataset
vari = var[0]
ampi = amp[0]
for v, a in zip(var[1:], amp[1:]):
ampi = (ampi * v + a * vari) / (vari + v)
vari = vari * v / (vari + v)
med = np.nanmedian(self.fraw)
return lnl, ampi / med, vari / med ** 2
else:
return lnl
|
r"""
Return the likelihood of the astrophysical model `model`.
Returns the likelihood of `model` marginalized over the PLD model.
:param ndarray model: A vector of the same shape as `self.time` \
corresponding to the astrophysical model.
:param bool refactor: Re-compute the Cholesky decomposition? This \
typically does not need to be done, except when the PLD \
model changes. Default :py:obj:`False`.
:param float pos_tol: the positive (i.e., above the median) \
outlier tolerance in standard deviations.
:param float neg_tol: the negative (i.e., below the median) \
outlier tolerance in standard deviations.
:param bool full_output: If :py:obj:`True`, returns the maximum \
likelihood model amplitude and the variance on the amplitude \
in addition to the log-likelihood. In the case of a transit \
model, these are the transit depth and depth variance. Default \
:py:obj:`False`.
|
def print_prefixed_lines(lines: List[Tuple[str, Optional[str]]]) -> str:
"""Print lines specified like this: ["prefix", "string"]"""
existing_lines = [line for line in lines if line[1] is not None]
pad_len = reduce(lambda pad, line: max(pad, len(line[0])), existing_lines, 0)
return "\n".join(
map(
lambda line: line[0].rjust(pad_len) + line[1], existing_lines # type:ignore
)
)
|
Print lines specified like this: ["prefix", "string"]
|
def get_page_content(self, page_id, page_info=0):
"""
PageInfo
0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass.
1 - Returns page content with no selection markup, but with all binary data.
2 - Returns page content with selection markup, but no binary data.
3 - Returns page content with selection markup and all binary data.
"""
try:
return(self.process.GetPageContent(page_id, "", page_info))
except Exception as e:
print(e)
print("Could not get Page Content")
|
PageInfo
0 - Returns only basic page content, without selection markup and binary data objects. This is the standard value to pass.
1 - Returns page content with no selection markup, but with all binary data.
2 - Returns page content with selection markup, but no binary data.
3 - Returns page content with selection markup and all binary data.
|
def locally_cache_remote_file(href, dir):
""" Locally cache a remote resource using a predictable file name
and awareness of modification date. Assume that files are "normal"
which is to say they have filenames with extensions.
"""
scheme, host, remote_path, params, query, fragment = urlparse(href)
assert scheme in ('http','https'), 'Scheme must be either http or https, not "%s" (for %s)' % (scheme,href)
head, ext = posixpath.splitext(posixpath.basename(remote_path))
head = sub(r'[^\w\-_]', '', head)
hash = md5(href).hexdigest()[:8]
local_path = '%(dir)s/%(host)s-%(hash)s-%(head)s%(ext)s' % locals()
headers = {}
if posixpath.exists(local_path):
msg('Found local file: %s' % local_path )
t = localtime(os.stat(local_path).st_mtime)
headers['If-Modified-Since'] = strftime('%a, %d %b %Y %H:%M:%S %Z', t)
if scheme == 'https':
conn = HTTPSConnection(host, timeout=5)
else:
conn = HTTPConnection(host, timeout=5)
if query:
remote_path += '?%s' % query
conn.request('GET', remote_path, headers=headers)
resp = conn.getresponse()
if resp.status in range(200, 210):
# hurrah, it worked
f = open(un_posix(local_path), 'wb')
msg('Reading from remote: %s' % remote_path)
f.write(resp.read())
f.close()
elif resp.status in (301, 302, 303) and resp.getheader('location', False):
# follow a redirect, totally untested.
redirected_href = urljoin(href, resp.getheader('location'))
redirected_path = locally_cache_remote_file(redirected_href, dir)
os.rename(redirected_path, local_path)
elif resp.status == 304:
# hurrah, it's cached
msg('Reading directly from local cache')
pass
else:
raise Exception("Failed to get remote resource %s: %s" % (href, resp.status))
return local_path
|
Locally cache a remote resource using a predictable file name
and awareness of modification date. Assume that files are "normal"
which is to say they have filenames with extensions.
|
def seek(self, offset, whence=SEEK_SET):
"""Reposition the file pointer."""
if whence == SEEK_SET:
self.__sf.seek(offset)
elif whence == SEEK_CUR:
self.__sf.seek(self.tell() + offset)
elif whence == SEEK_END:
self.__sf.seek(self.__sf.filesize - offset)
|
Reposition the file pointer.
|
def get_title(self, properly_capitalized=False):
"""Returns the artist or track title."""
if properly_capitalized:
self.title = _extract(
self._request(self.ws_prefix + ".getInfo", True), "name"
)
return self.title
|
Returns the artist or track title.
|
def ok_rev_reg_id(token: str, issuer_did: str = None) -> bool:
"""
Whether input token looks like a valid revocation registry identifier from input issuer DID (default any); i.e.,
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag>:CL_ACCUM:<rev-reg-id-tag> for protocol >= 1.4, or
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:CL_ACCUM:<rev-reg-id-tag> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid revocation registry identifier
"""
rr_id_m = re.match(
'([{0}]{{21,22}}):4:([{0}]{{21,22}}):3:CL:[1-9][0-9]*(:.+)?:CL_ACCUM:.+$'.format(B58),
token or '')
return bool(rr_id_m) and ((not issuer_did) or (rr_id_m.group(1) == issuer_did and rr_id_m.group(2) == issuer_did))
|
Whether input token looks like a valid revocation registry identifier from input issuer DID (default any); i.e.,
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:<cred-def-id-tag>:CL_ACCUM:<rev-reg-id-tag> for protocol >= 1.4, or
<issuer-did>:4:<issuer-did>:3:CL:<schema-seq-no>:CL_ACCUM:<rev-reg-id-tag> for protocol == 1.3.
:param token: candidate string
:param issuer_did: issuer DID to match, if specified
:return: whether input token looks like a valid revocation registry identifier
|
def get_commit_bzs(self, from_revision, to_revision=None):
"""
Return a list of tuples, one per commit. Each tuple is (sha1, subject,
bz_list). bz_list is a (possibly zero-length) list of numbers.
"""
rng = self.rev_range(from_revision, to_revision)
GIT_COMMIT_FIELDS = ['id', 'subject', 'body']
GIT_LOG_FORMAT = ['%h', '%s', '%b']
GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e'
log_out = self('log', '--format=%s' % GIT_LOG_FORMAT, rng,
log_cmd=False, fatal=False)
if not log_out:
return []
log = log_out.strip('\n\x1e').split("\x1e")
log = [row.strip('\n\t ').split("\x1f") for row in log]
log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log]
result = []
for commit in log:
bzs = search_bug_references(commit['subject'])
bzs.extend(search_bug_references(commit['body']))
result.append((commit['id'], commit['subject'], bzs))
return result
|
Return a list of tuples, one per commit. Each tuple is (sha1, subject,
bz_list). bz_list is a (possibly zero-length) list of numbers.
|
def make_category(self, string, parent=None, order=1):
"""
Make and save a category object from a string
"""
cat = Category(
name=string.strip(),
slug=slugify(SLUG_TRANSLITERATOR(string.strip()))[:49],
# arent=parent,
order=order
)
cat._tree_manager.insert_node(cat, parent, 'last-child', True)
cat.save()
if parent:
parent.rght = cat.rght + 1
parent.save()
return cat
|
Make and save a category object from a string
|
def createTemplate(data):
"""
Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID.
"""
conn = Qubole.agent()
return conn.post(Template.rest_entity_path, data)
|
Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID.
|
def convex_hull(self):
"""
The convex hull of the whole scene
Returns
---------
hull: Trimesh object, convex hull of all meshes in scene
"""
points = util.vstack_empty([m.vertices for m in self.dump()])
hull = convex.convex_hull(points)
return hull
|
The convex hull of the whole scene
Returns
---------
hull: Trimesh object, convex hull of all meshes in scene
|
def _compile_fragment_ast(schema, current_schema_type, ast, location, context):
"""Return a list of basic blocks corresponding to the inline fragment at this AST node.
Args:
schema: GraphQL schema object, obtained from the graphql library
current_schema_type: GraphQLType, the schema type at the current location
ast: GraphQL AST node, obtained from the graphql library.
location: Location object representing the current location in the query
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
Returns:
list of basic blocks, the compiled output of the vertex AST node
"""
query_metadata_table = context['metadata']
# step F-2. Emit a type coercion block if appropriate,
# then recurse into the fragment's selection.
coerces_to_type_name = ast.type_condition.name.value
coerces_to_type_obj = schema.get_type(coerces_to_type_name)
basic_blocks = []
# Check if the coercion is necessary.
# No coercion is necessary if coercing to the current type of the scope,
# or if the scope is of union type, to the base type of the union as defined by
# the type_equivalence_hints compilation parameter.
is_same_type_as_scope = current_schema_type.is_same_type(coerces_to_type_obj)
equivalent_union_type = context['type_equivalence_hints'].get(coerces_to_type_obj, None)
is_base_type_of_union = (
isinstance(current_schema_type, GraphQLUnionType) and
current_schema_type.is_same_type(equivalent_union_type)
)
if not (is_same_type_as_scope or is_base_type_of_union):
# Coercion is required.
query_metadata_table.record_coercion_at_location(location, coerces_to_type_obj)
basic_blocks.append(blocks.CoerceType({coerces_to_type_name}))
inner_basic_blocks = _compile_ast_node_to_ir(
schema, coerces_to_type_obj, ast, location, context)
basic_blocks.extend(inner_basic_blocks)
return basic_blocks
|
Return a list of basic blocks corresponding to the inline fragment at this AST node.
Args:
schema: GraphQL schema object, obtained from the graphql library
current_schema_type: GraphQLType, the schema type at the current location
ast: GraphQL AST node, obtained from the graphql library.
location: Location object representing the current location in the query
context: dict, various per-compilation data (e.g. declared tags, whether the current block
is optional, etc.). May be mutated in-place in this function!
Returns:
list of basic blocks, the compiled output of the vertex AST node
|
def save_yaml_model(model, filename, sort=False, **kwargs):
"""
Write the cobra model to a file in YAML format.
``kwargs`` are passed on to ``yaml.dump``.
Parameters
----------
model : cobra.Model
The cobra model to represent.
filename : str or file-like
File path or descriptor that the YAML representation should be
written to.
sort : bool, optional
Whether to sort the metabolites, reactions, and genes or maintain the
order defined in the model.
See Also
--------
to_yaml : Return a string representation.
ruamel.yaml.dump : Base function.
"""
obj = model_to_dict(model, sort=sort)
obj["version"] = YAML_SPEC
if isinstance(filename, string_types):
with io.open(filename, "w") as file_handle:
yaml.dump(obj, file_handle, **kwargs)
else:
yaml.dump(obj, filename, **kwargs)
|
Write the cobra model to a file in YAML format.
``kwargs`` are passed on to ``yaml.dump``.
Parameters
----------
model : cobra.Model
The cobra model to represent.
filename : str or file-like
File path or descriptor that the YAML representation should be
written to.
sort : bool, optional
Whether to sort the metabolites, reactions, and genes or maintain the
order defined in the model.
See Also
--------
to_yaml : Return a string representation.
ruamel.yaml.dump : Base function.
|
def multiply(self, matrix):
"""
Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`RowMatrix`
>>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]]))
>>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])]
"""
if not isinstance(matrix, DenseMatrix):
raise ValueError("Only multiplication with DenseMatrix "
"is supported.")
j_model = self._java_matrix_wrapper.call("multiply", matrix)
return RowMatrix(j_model)
|
Multiply this matrix by a local dense matrix on the right.
:param matrix: a local dense matrix whose number of rows must match the number of columns
of this matrix
:returns: :py:class:`RowMatrix`
>>> rm = RowMatrix(sc.parallelize([[0, 1], [2, 3]]))
>>> rm.multiply(DenseMatrix(2, 2, [0, 2, 1, 3])).rows.collect()
[DenseVector([2.0, 3.0]), DenseVector([6.0, 11.0])]
|
def verify(path):
"""Verify that `path` is a zip file with Phasics TIFF files"""
valid = False
try:
zf = zipfile.ZipFile(path)
except (zipfile.BadZipfile, IsADirectoryError):
pass
else:
names = sorted(zf.namelist())
names = [nn for nn in names if nn.endswith(".tif")]
names = [nn for nn in names if nn.startswith("SID PHA")]
for name in names:
with zf.open(name) as pt:
fd = io.BytesIO(pt.read())
if SingleTifPhasics.verify(fd):
valid = True
break
zf.close()
return valid
|
Verify that `path` is a zip file with Phasics TIFF files
|
def get_experiment_kind(root):
"""Read common properties from root of ReSpecTh XML file.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with experiment type and apparatus information.
"""
properties = {}
if root.find('experimentType').text == 'Ignition delay measurement':
properties['experiment-type'] = 'ignition delay'
else:
raise NotImplementedError(root.find('experimentType').text + ' not (yet) supported')
properties['apparatus'] = {'kind': '', 'institution': '', 'facility': ''}
kind = getattr(root.find('apparatus/kind'), 'text', False)
# Test for missing attribute or empty string
if not kind:
raise MissingElementError('apparatus/kind')
elif kind in ['shock tube', 'rapid compression machine']:
properties['apparatus']['kind'] = kind
else:
raise NotImplementedError(kind + ' experiment not (yet) supported')
return properties
|
Read common properties from root of ReSpecTh XML file.
Args:
root (`~xml.etree.ElementTree.Element`): Root of ReSpecTh XML file
Returns:
properties (`dict`): Dictionary with experiment type and apparatus information.
|
def cases(self, env, data):
'''Calls each nested handler until one of them returns nonzero result.
If any handler returns `None`, it is interpreted as
"request does not match, the handler has nothing to do with it and
`web.cases` should try to call the next handler".'''
for handler in self.handlers:
env._push()
data._push()
try:
result = handler(env, data)
finally:
env._pop()
data._pop()
if result is not None:
return result
|
Calls each nested handler until one of them returns nonzero result.
If any handler returns `None`, it is interpreted as
"request does not match, the handler has nothing to do with it and
`web.cases` should try to call the next handler".
|
def recursive_apply(inval, func):
'''Recursively apply a function to all levels of nested iterables
:param inval: the object to run the function on
:param func: the function that will be run on the inval
'''
if isinstance(inval, dict):
return {k: recursive_apply(v, func) for k, v in inval.items()}
elif isinstance(inval, list):
return [recursive_apply(v, func) for v in inval]
else:
return func(inval)
|
Recursively apply a function to all levels of nested iterables
:param inval: the object to run the function on
:param func: the function that will be run on the inval
|
def filter_bolts(table, header):
""" filter to keep bolts """
bolts_info = []
for row in table:
if row[0] == 'bolt':
bolts_info.append(row)
return bolts_info, header
|
filter to keep bolts
|
def loadUi(self, filename, baseinstance=None):
"""
Generate a loader to load the filename.
:param filename | <str>
baseinstance | <QWidget>
:return <QWidget> || None
"""
try:
xui = ElementTree.parse(filename)
except xml.parsers.expat.ExpatError:
log.exception('Could not load file: %s' % filename)
return None
loader = UiLoader(baseinstance)
# pre-load custom widgets
xcustomwidgets = xui.find('customwidgets')
if xcustomwidgets is not None:
for xcustom in xcustomwidgets:
header = xcustom.find('header').text
clsname = xcustom.find('class').text
if not header:
continue
if clsname in loader.dynamicWidgets:
continue
# modify the C++ headers to use the Python wrapping
if '/' in header:
header = 'xqt.' + '.'.join(header.split('/')[:-1])
# try to use the custom widgets
try:
__import__(header)
module = sys.modules[header]
cls = getattr(module, clsname)
except (ImportError, KeyError, AttributeError):
log.error('Could not load %s.%s' % (header, clsname))
continue
loader.dynamicWidgets[clsname] = cls
loader.registerCustomWidget(cls)
# load the options
ui = loader.load(filename)
QtCore.QMetaObject.connectSlotsByName(ui)
return ui
|
Generate a loader to load the filename.
:param filename | <str>
baseinstance | <QWidget>
:return <QWidget> || None
|
def add_subtree(cls, for_node, node, options):
""" Recursively build options tree. """
if cls.is_loop_safe(for_node, node):
options.append(
(node.pk,
mark_safe(cls.mk_indent(node.get_depth()) + escape(node))))
for subnode in node.get_children():
cls.add_subtree(for_node, subnode, options)
|
Recursively build options tree.
|
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
|
Return a shuffled copy of y eventually shuffle among same labels.
|
def validate_pair(ob: Any) -> bool:
"""
Does the object have length 2?
"""
try:
if len(ob) != 2:
log.warning("Unexpected result: {!r}", ob)
raise ValueError()
except ValueError:
return False
return True
|
Does the object have length 2?
|
def reload(self):
"""
Re-fetches the object from the API, discarding any local changes.
Returns without doing anything if the object is new.
"""
if not self.id:
return
reloaded_object = self.__class__.find(self.id)
self.set_raw(
reloaded_object.raw,
reloaded_object.etag
)
|
Re-fetches the object from the API, discarding any local changes.
Returns without doing anything if the object is new.
|
def get_image(self, image, output='vector'):
""" A flexible method for transforming between different
representations of image data.
Args:
image: The input image. Can be a string (filename of image),
NiBabel image, N-dimensional array (must have same shape as
self.volume), or vectorized image data (must have same length
as current conjunction mask).
output: The format of the returned image representation. Must be
one of:
'vector': A 1D vectorized array
'array': An N-dimensional array, with
shape = self.volume.shape
'image': A NiBabel image
Returns: An object containing image data; see output options above.
"""
if isinstance(image, string_types):
image = nb.load(image)
if type(image).__module__.startswith('nibabel'):
if output == 'image':
return image
image = image.get_data()
if not type(image).__module__.startswith('numpy'):
raise ValueError("Input image must be a string, a NiBabel image, "
"or a numpy array.")
if image.shape[:3] == self.volume.shape:
if output == 'image':
return nb.nifti1.Nifti1Image(image, None, self.get_header())
elif output == 'array':
return image
else:
image = image.ravel()
if output == 'vector':
return image.ravel()
image = np.reshape(image, self.volume.shape)
if output == 'array':
return image
return nb.nifti1.Nifti1Image(image, None, self.get_header())
|
A flexible method for transforming between different
representations of image data.
Args:
image: The input image. Can be a string (filename of image),
NiBabel image, N-dimensional array (must have same shape as
self.volume), or vectorized image data (must have same length
as current conjunction mask).
output: The format of the returned image representation. Must be
one of:
'vector': A 1D vectorized array
'array': An N-dimensional array, with
shape = self.volume.shape
'image': A NiBabel image
Returns: An object containing image data; see output options above.
|
def rate_limited(max_per_hour: int, *args: Any) -> Callable[..., Any]:
"""Rate limit a function."""
return util.rate_limited(max_per_hour, *args)
|
Rate limit a function.
|
def patcher(args):
"""
%prog patcher backbone.bed other.bed
Given optical map alignment, prepare the patchers. Use --backbone to suggest
which assembly is the major one, and the patchers will be extracted from
another assembly.
"""
from jcvi.formats.bed import uniq
p = OptionParser(patcher.__doc__)
p.add_option("--backbone", default="OM",
help="Prefix of the backbone assembly [default: %default]")
p.add_option("--object", default="object",
help="New object name [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
backbonebed, otherbed = args
backbonebed = uniq([backbonebed])
otherbed = uniq([otherbed])
pf = backbonebed.split(".")[0]
key = lambda x: (x.seqid, x.start, x.end)
# Make a uniq bed keeping backbone at redundant intervals
cmd = "intersectBed -v -wa"
cmd += " -a {0} -b {1}".format(otherbed, backbonebed)
outfile = otherbed.rsplit(".", 1)[0] + ".not." + backbonebed
sh(cmd, outfile=outfile)
uniqbed = Bed()
uniqbedfile = pf + ".merged.bed"
uniqbed.extend(Bed(backbonebed))
uniqbed.extend(Bed(outfile))
uniqbed.print_to_file(uniqbedfile, sorted=True)
# Condense adjacent intervals, allow some chaining
bed = uniqbed
key = lambda x: range_parse(x.accn).seqid
bed_fn = pf + ".patchers.bed"
bed_fw = open(bed_fn, "w")
for k, sb in groupby(bed, key=key):
sb = list(sb)
chr, start, end, strand = merge_ranges(sb)
print("\t".join(str(x) for x in \
(chr, start, end, opts.object, 1000, strand)), file=bed_fw)
bed_fw.close()
|
%prog patcher backbone.bed other.bed
Given optical map alignment, prepare the patchers. Use --backbone to suggest
which assembly is the major one, and the patchers will be extracted from
another assembly.
|
def RetryUpload(self, job, job_id, error):
"""Retry the BigQuery upload job.
Using the same job id protects us from duplicating data on the server. If we
fail all of our retries we raise.
Args:
job: BigQuery job object
job_id: ID string for this upload job
error: errors.HttpError object from the first error
Returns:
API response object on success, None on failure
Raises:
BigQueryJobUploadError: if we can't get the bigquery job started after
retry_max_attempts
"""
if self.IsErrorRetryable(error):
retry_count = 0
sleep_interval = config.CONFIG["BigQuery.retry_interval"]
while retry_count < config.CONFIG["BigQuery.retry_max_attempts"]:
time.sleep(sleep_interval.seconds)
logging.info("Retrying job_id: %s", job_id)
retry_count += 1
try:
response = job.execute()
return response
except errors.HttpError as e:
if self.IsErrorRetryable(e):
sleep_interval *= config.CONFIG["BigQuery.retry_multiplier"]
logging.exception("Error with job: %s, will retry in %s", job_id,
sleep_interval)
else:
raise BigQueryJobUploadError(
"Can't retry error code %s. Giving up"
" on job: %s." % (e.resp.status, job_id))
else:
raise BigQueryJobUploadError("Can't retry error code %s. Giving up on "
"job: %s." % (error.resp.status, job_id))
raise BigQueryJobUploadError(
"Giving up on job:%s after %s retries." % (job_id, retry_count))
|
Retry the BigQuery upload job.
Using the same job id protects us from duplicating data on the server. If we
fail all of our retries we raise.
Args:
job: BigQuery job object
job_id: ID string for this upload job
error: errors.HttpError object from the first error
Returns:
API response object on success, None on failure
Raises:
BigQueryJobUploadError: if we can't get the bigquery job started after
retry_max_attempts
|
def list_files(start_path):
"""tree unix command replacement."""
s = u'\n'
for root, dirs, files in os.walk(start_path):
level = root.replace(start_path, '').count(os.sep)
indent = ' ' * 4 * level
s += u'{}{}/\n'.format(indent, os.path.basename(root))
sub_indent = ' ' * 4 * (level + 1)
for f in files:
s += u'{}{}\n'.format(sub_indent, f)
return s
|
tree unix command replacement.
|
def translate(args):
"""
%prog translate cdsfasta
Translate CDS to proteins. The tricky thing is that sometimes the CDS
represents a partial gene, therefore disrupting the frame of the protein.
Check all three frames to get a valid translation.
"""
transl_tables = [str(x) for x in xrange(1,25)]
p = OptionParser(translate.__doc__)
p.add_option("--ids", default=False, action="store_true",
help="Create .ids file with the complete/partial/gaps "
"label [default: %default]")
p.add_option("--longest", default=False, action="store_true",
help="Find the longest ORF from each input CDS [default: %default]")
p.add_option("--table", default=1, choices=transl_tables,
help="Specify translation table to use [default: %default]")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
cdsfasta, = args
if opts.longest:
cdsfasta = longestorf([cdsfasta])
f = Fasta(cdsfasta, lazy=True)
outfile = opts.outfile
fw = must_open(outfile, "w")
if opts.ids:
idsfile = cdsfasta.rsplit(".", 1)[0] + ".ids"
ids = open(idsfile, "w")
else:
ids = None
five_prime_missing = three_prime_missing = 0
contain_ns = complete = cannot_translate = total = 0
for name, rec in f.iteritems_ordered():
cds = rec.seq
cdslen = len(cds)
peplen = cdslen / 3
total += 1
# Try all three frames
pep = ""
for i in xrange(3):
newcds = cds[i: i + peplen * 3]
newpep = newcds.translate(table=opts.table)
if len(newpep.split("*")[0]) > len(pep.split("*")[0]):
pep = newpep
labels = []
if "*" in pep.rstrip("*"):
logging.error("{0} cannot translate".format(name))
cannot_translate += 1
labels.append("cannot_translate")
contains_start = pep.startswith("M")
contains_stop = pep.endswith("*")
contains_ns = "X" in pep
start_ns = pep.startswith("X")
end_ns = pep.endswith("X")
if not contains_start:
five_prime_missing += 1
labels.append("five_prime_missing")
if not contains_stop:
three_prime_missing += 1
labels.append("three_prime_missing")
if contains_ns:
contain_ns += 1
labels.append("contain_ns")
if contains_start and contains_stop:
complete += 1
labels.append("complete")
if start_ns:
labels.append("start_ns")
if end_ns:
labels.append("end_ns")
if ids:
print("\t".join((name, ",".join(labels))), file=ids)
peprec = SeqRecord(pep, id=name, description=rec.description)
SeqIO.write([peprec], fw, "fasta")
fw.flush()
print("Complete gene models: {0}".\
format(percentage(complete, total)), file=sys.stderr)
print("Missing 5`-end: {0}".\
format(percentage(five_prime_missing, total)), file=sys.stderr)
print("Missing 3`-end: {0}".\
format(percentage(three_prime_missing, total)), file=sys.stderr)
print("Contain Ns: {0}".\
format(percentage(contain_ns, total)), file=sys.stderr)
if cannot_translate:
print("Cannot translate: {0}".\
format(percentage(cannot_translate, total)), file=sys.stderr)
fw.close()
return cdsfasta, outfile
|
%prog translate cdsfasta
Translate CDS to proteins. The tricky thing is that sometimes the CDS
represents a partial gene, therefore disrupting the frame of the protein.
Check all three frames to get a valid translation.
|
def trainClassifier(cls, data, numClasses, categoricalFeaturesInfo, numTrees,
featureSubsetStrategy="auto", impurity="gini", maxDepth=4, maxBins=32,
seed=None):
"""
Train a random forest model for binary or multiclass
classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "sqrt".
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42)
>>> model.numTrees()
3
>>> model.totalNumNodes()
7
>>> print(model)
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
>>> print(model.toDebugString())
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
Tree 0:
Predict: 1.0
Tree 1:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
Tree 2:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[3.0], [1.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
"""
return cls._train(data, "classification", numClasses,
categoricalFeaturesInfo, numTrees, featureSubsetStrategy, impurity,
maxDepth, maxBins, seed)
|
Train a random forest model for binary or multiclass
classification.
:param data:
Training dataset: RDD of LabeledPoint. Labels should take values
{0, 1, ..., numClasses-1}.
:param numClasses:
Number of classes for classification.
:param categoricalFeaturesInfo:
Map storing arity of categorical features. An entry (n -> k)
indicates that feature n is categorical with k categories
indexed from 0: {0, 1, ..., k-1}.
:param numTrees:
Number of trees in the random forest.
:param featureSubsetStrategy:
Number of features to consider for splits at each node.
Supported values: "auto", "all", "sqrt", "log2", "onethird".
If "auto" is set, this parameter is set based on numTrees:
if numTrees == 1, set to "all";
if numTrees > 1 (forest) set to "sqrt".
(default: "auto")
:param impurity:
Criterion used for information gain calculation.
Supported values: "gini" or "entropy".
(default: "gini")
:param maxDepth:
Maximum depth of tree (e.g. depth 0 means 1 leaf node, depth 1
means 1 internal node + 2 leaf nodes).
(default: 4)
:param maxBins:
Maximum number of bins used for splitting features.
(default: 32)
:param seed:
Random seed for bootstrapping and choosing feature subsets.
Set as None to generate seed based on system time.
(default: None)
:return:
RandomForestModel that can be used for prediction.
Example usage:
>>> from pyspark.mllib.regression import LabeledPoint
>>> from pyspark.mllib.tree import RandomForest
>>>
>>> data = [
... LabeledPoint(0.0, [0.0]),
... LabeledPoint(0.0, [1.0]),
... LabeledPoint(1.0, [2.0]),
... LabeledPoint(1.0, [3.0])
... ]
>>> model = RandomForest.trainClassifier(sc.parallelize(data), 2, {}, 3, seed=42)
>>> model.numTrees()
3
>>> model.totalNumNodes()
7
>>> print(model)
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
>>> print(model.toDebugString())
TreeEnsembleModel classifier with 3 trees
<BLANKLINE>
Tree 0:
Predict: 1.0
Tree 1:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
Tree 2:
If (feature 0 <= 1.5)
Predict: 0.0
Else (feature 0 > 1.5)
Predict: 1.0
<BLANKLINE>
>>> model.predict([2.0])
1.0
>>> model.predict([0.0])
0.0
>>> rdd = sc.parallelize([[3.0], [1.0]])
>>> model.predict(rdd).collect()
[1.0, 0.0]
|
def parents(self):
"""return the ancestor nodes"""
assert self.parent is not self
if self.parent is None:
return []
return [self.parent] + self.parent.parents()
|
return the ancestor nodes
|
def read_remote(self):
'''Send a message back to the server (in contrast to
the local user output channel).'''
coded_line = self.inout.read_msg()
if isinstance(coded_line, bytes):
coded_line = coded_line.decode("utf-8")
control = coded_line[0]
remote_line = coded_line[1:]
return (control, remote_line)
|
Send a message back to the server (in contrast to
the local user output channel).
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.