code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _remove_wire_nets(block):
""" Remove all wire nodes from the block. """
wire_src_dict = _ProducerList()
wire_removal_set = set() # set of all wirevectors to be removed
# one pass to build the map of value producers and
# all of the nets and wires to be removed
for net in block.logic:
if net.op == 'w':
wire_src_dict[net.dests[0]] = net.args[0]
if not isinstance(net.dests[0], Output):
wire_removal_set.add(net.dests[0])
# second full pass to create the new logic without the wire nets
new_logic = set()
for net in block.logic:
if net.op != 'w' or isinstance(net.dests[0], Output):
new_args = tuple(wire_src_dict.find_producer(x) for x in net.args)
new_net = LogicNet(net.op, net.op_param, new_args, net.dests)
new_logic.add(new_net)
# now update the block with the new logic and remove wirevectors
block.logic = new_logic
for dead_wirevector in wire_removal_set:
del block.wirevector_by_name[dead_wirevector.name]
block.wirevector_set.remove(dead_wirevector)
block.sanity_check()
|
Remove all wire nodes from the block.
|
def blurred_image_1d_from_1d_unblurred_and_blurring_images(unblurred_image_1d, blurring_image_1d, convolver):
"""For a 1D masked image and 1D blurring image (the regions outside the mask whose light blurs \
into the mask after PSF convolution), use both to compute the blurred image within the mask via PSF convolution.
The convolution uses each image's convolver (*See ccd.convolution*).
Parameters
----------
unblurred_image_1d : ndarray
The 1D masked datas which is blurred.
blurring_image_1d : ndarray
The 1D masked blurring image which is used for blurring.
convolver : ccd.convolution.ConvolverImage
The image-convolver which performs the convolution in 1D.
"""
return convolver.convolve_image(image_array=unblurred_image_1d, blurring_array=blurring_image_1d)
|
For a 1D masked image and 1D blurring image (the regions outside the mask whose light blurs \
into the mask after PSF convolution), use both to compute the blurred image within the mask via PSF convolution.
The convolution uses each image's convolver (*See ccd.convolution*).
Parameters
----------
unblurred_image_1d : ndarray
The 1D masked datas which is blurred.
blurring_image_1d : ndarray
The 1D masked blurring image which is used for blurring.
convolver : ccd.convolution.ConvolverImage
The image-convolver which performs the convolution in 1D.
|
def show_system_info_output_show_system_info_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_system_info = ET.Element("show_system_info")
config = show_system_info
output = ET.SubElement(show_system_info, "output")
show_system_info = ET.SubElement(output, "show-system-info")
rbridge_id = ET.SubElement(show_system_info, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def set_extend(self, extend):
"""
Sets the mode to be used for drawing outside the area of this pattern.
See :ref:`EXTEND` for details on the semantics of each extend strategy.
The default extend mode is
:obj:`NONE <EXTEND_NONE>` for :class:`SurfacePattern`
and :obj:`PAD <EXTEND_PAD>` for :class:`Gradient` patterns.
"""
cairo.cairo_pattern_set_extend(self._pointer, extend)
self._check_status()
|
Sets the mode to be used for drawing outside the area of this pattern.
See :ref:`EXTEND` for details on the semantics of each extend strategy.
The default extend mode is
:obj:`NONE <EXTEND_NONE>` for :class:`SurfacePattern`
and :obj:`PAD <EXTEND_PAD>` for :class:`Gradient` patterns.
|
def get_lines_from_file(filename, lineno, context_lines):
"""
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
"""
def get_lines(start, end):
return [linecache.getline(filename, l).rstrip() for l in range(start, end)]
lower_bound = max(1, lineno - context_lines)
upper_bound = lineno + context_lines
linecache.checkcache(filename)
pre_context = get_lines(lower_bound, lineno)
context_line = linecache.getline(filename, lineno).rstrip()
post_context = get_lines(lineno + 1, upper_bound)
return lower_bound, pre_context, context_line, post_context
|
Returns context_lines before and after lineno from file.
Returns (pre_context_lineno, pre_context, context_line, post_context).
|
def _make_args_checker(self):
"""
Create a function that checks signature of the source function.
"""
def _checker(*args, **kws):
# Check if too many arguments are provided
nargs = len(args)
nnonvaargs = min(nargs, self._max_positional_args)
if nargs > self._max_positional_args and self._ivararg is None:
raise self._too_many_args_error(nargs)
# Check if there are too few positional arguments (without defaults)
if nargs < self._min_positional_args:
missing = [p.name
for p in self.params[nargs:self._min_positional_args]
if p.name not in kws]
# The "missing" arguments may still be provided as keywords, in
# which case it's not an error at all.
if missing:
raise self._too_few_args_error(missing, "positional")
# Check if there are too few required keyword arguments
if self._required_kwonly_args:
missing = [kw
for kw in self._required_kwonly_args
if kw not in kws]
if missing:
raise self._too_few_args_error(missing, "keyword")
# Check types of positional arguments
for i, argvalue in enumerate(args):
param = self.params[i if i < self._max_positional_args else
self._ivararg]
if param.checker and not (
param.checker.check(argvalue) or
param.has_default and
(argvalue is param.default or argvalue == param.default)
):
raise self._param_type_error(param, param.name, argvalue)
# Check types of keyword arguments
for argname, argvalue in kws.items():
argindex = self._iargs.get(argname)
if argindex is not None and argindex < nnonvaargs:
raise self._repeating_arg_error(argname)
index = self._iargs.get(argname)
if index is None:
index = self._ivarkws
if index is None:
s = "%s got an unexpected keyword argument `%s`" % \
(self.name_bt, argname)
raise self._type_error(s)
param = self.params[index]
if param.checker and not (
param.checker.check(argvalue) or
param.has_default and
(argvalue is param.default or argvalue == param.default)
):
raise self._param_type_error(param, argname, argvalue)
return _checker
|
Create a function that checks signature of the source function.
|
def run(itf):
"""
Run postanalyze functions.
"""
if not itf:
return 1
# access user input
options = SplitInput(itf)
# check input args
error_check(options)
# read input files
try:
molecules, ensemble_lookup = ReadFiles(options)
except:
return 1
if options.compare:
compare(molecules, ensemble_lookup, options)
else:
evaluate_list(molecules, ensemble_lookup, options)
|
Run postanalyze functions.
|
def get_items(self, from_date, url, expand_fields=True):
"""Retrieve all the items from a given date.
:param url: endpoint API url
:param from_date: obtain items updated since this date
:param expand_fields: if True, it includes the expand fields in the payload
"""
start_at = 0
req = self.fetch(url, payload=self.__build_payload(start_at, from_date, expand_fields))
issues = req.text
data = req.json()
titems = data['total']
nitems = data['maxResults']
start_at += min(nitems, titems)
self.__log_status(start_at, titems, url)
while issues:
yield issues
issues = None
if data['startAt'] + nitems < titems:
req = self.fetch(url, payload=self.__build_payload(start_at, from_date, expand_fields))
data = req.json()
start_at += nitems
issues = req.text
self.__log_status(start_at, titems, url)
|
Retrieve all the items from a given date.
:param url: endpoint API url
:param from_date: obtain items updated since this date
:param expand_fields: if True, it includes the expand fields in the payload
|
def timeago(tz=None, *args, **kwargs):
"""Return a datetime so much time ago. Takes the same arguments as timedelta()."""
return totz(datetime.now(), tz) - timedelta(*args, **kwargs)
|
Return a datetime so much time ago. Takes the same arguments as timedelta().
|
def build_agg_vec(agg_vec, **source):
""" Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
"""
# build a dict with aggregation vectors in source and folder
if type(agg_vec) is str:
agg_vec = [agg_vec]
agg_dict = dict()
for entry in agg_vec:
try:
agg_dict[entry] = source[entry]
except KeyError:
folder = source.get('path', './')
folder = os.path.join(PYMRIO_PATH[folder], 'concordance')
for file in os.listdir(folder):
if entry == os.path.splitext(file)[0]:
_tmp = np.genfromtxt(os.path.join(folder, file), dtype=str)
if _tmp.ndim == 1:
agg_dict[entry] = [None if ee == 'None'
else ee for ee in _tmp.tolist()]
else:
agg_dict[entry] = [None if ee == 'None'
else ee
for ee in _tmp[:, -1].tolist()]
break
else:
logging.error(
'Aggregation vector -- {} -- not found'
.format(str(entry)))
# build the summary aggregation vector
def _rep(ll, ii, vv): ll[ii] = vv
miss_val = source.get('miss', 'REST')
vec_list = [agg_dict[ee] for ee in agg_vec]
out = [None, ] * len(vec_list[0])
for currvec in vec_list:
if len(currvec) != len(out):
logging.warn('Inconsistent vector length')
[_rep(out, ind, val) for ind, val in
enumerate(currvec) if not out[ind]]
[_rep(out, ind, miss_val) for ind, val in enumerate(out) if not val]
return out
|
Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
|
def plot_neuron3d(ax, nrn, neurite_type=NeuriteType.all,
diameter_scale=_DIAMETER_SCALE, linewidth=_LINEWIDTH,
color=None, alpha=_ALPHA):
'''
Generates a figure of the neuron,
that contains a soma and a list of trees.
Args:
ax(matplotlib axes): on what to plot
nrn(neuron): neuron to be plotted
neurite_type(NeuriteType): an optional filter on the neurite type
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
'''
plot_soma3d(ax, nrn.soma, color=color, alpha=alpha)
for neurite in iter_neurites(nrn, filt=tree_type_checker(neurite_type)):
plot_tree3d(ax, neurite,
diameter_scale=diameter_scale, linewidth=linewidth,
color=color, alpha=alpha)
ax.set_title(nrn.name)
|
Generates a figure of the neuron,
that contains a soma and a list of trees.
Args:
ax(matplotlib axes): on what to plot
nrn(neuron): neuron to be plotted
neurite_type(NeuriteType): an optional filter on the neurite type
diameter_scale(float): Scale factor multiplied with segment diameters before plotting
linewidth(float): all segments are plotted with this width, but only if diameter_scale=None
color(str or None): Color of plotted values, None corresponds to default choice
alpha(float): Transparency of plotted values
|
def get_unawarded_user_ids(self, db_read=None):
"""
Returns unawarded user ids (need to be saved) and the count.
"""
db_read = db_read or self.db_read
already_awarded_ids = self.get_already_awarded_user_ids(db_read=db_read)
current_ids = self.get_current_user_ids(db_read=db_read)
unawarded_ids = list(set(current_ids) - set(already_awarded_ids))
unawarded_ids_count = len(unawarded_ids)
logger.debug(
'→ Badge %s: %d users need to be awarded',
self.slug,
unawarded_ids_count)
return (unawarded_ids, unawarded_ids_count)
|
Returns unawarded user ids (need to be saved) and the count.
|
def _get_code_w_scope(data, position, obj_end, opts):
"""Decode a BSON code_w_scope to bson.code.Code."""
code, position = _get_string(data, position + 4, obj_end, opts)
scope, position = _get_object(data, position, obj_end, opts)
return Code(code, scope), position
|
Decode a BSON code_w_scope to bson.code.Code.
|
def remove_api_key(self):
"""
Removes the user's existing API key, if present, and sets the current instance's 'api_key'
attribute to the empty string.
Returns:
`NoneType`: None.
"""
url = self.record_url + "/remove_api_key"
res = requests.patch(url=url, headers=HEADERS, verify=False)
res.raise_for_status()
self.api_key = ""
|
Removes the user's existing API key, if present, and sets the current instance's 'api_key'
attribute to the empty string.
Returns:
`NoneType`: None.
|
def subscribe(self, tag, fun, description=None):
""" Subscribe to something and register a function """
self.methods[tag] = fun
self.descriptions[tag] = description
self.socket.set_string_option(nanomsg.SUB, nanomsg.SUB_SUBSCRIBE, tag)
|
Subscribe to something and register a function
|
def update_binary_stats(self, label, pred):
"""
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
"""
pred = pred.asnumpy()
label = label.asnumpy().astype('int32')
pred_label = numpy.argmax(pred, axis=1)
check_label_shapes(label, pred)
if len(numpy.unique(label)) > 2:
raise ValueError("%s currently only supports binary classification."
% self.__class__.__name__)
pred_true = (pred_label == 1)
pred_false = 1 - pred_true
label_true = (label == 1)
label_false = 1 - label_true
true_pos = (pred_true * label_true).sum()
false_pos = (pred_true * label_false).sum()
false_neg = (pred_false * label_true).sum()
true_neg = (pred_false * label_false).sum()
self.true_positives += true_pos
self.global_true_positives += true_pos
self.false_positives += false_pos
self.global_false_positives += false_pos
self.false_negatives += false_neg
self.global_false_negatives += false_neg
self.true_negatives += true_neg
self.global_true_negatives += true_neg
|
Update various binary classification counts for a single (label, pred)
pair.
Parameters
----------
label : `NDArray`
The labels of the data.
pred : `NDArray`
Predicted values.
|
def solar_midnight(self, date=None, local=True):
"""Calculates the solar midnight (the time when the sun is at its lowest
point.)
:param date: The date for which to calculate the midnight time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:returns: The date and time at which the solar midnight occurs.
:rtype: :class:`~datetime.datetime`
"""
if local and self.timezone is None:
raise ValueError("Local time requested but Location has no timezone set.")
if self.astral is None:
self.astral = Astral()
if date is None:
date = datetime.date.today()
midnight = self.astral.solar_midnight_utc(date, self.longitude)
if local:
return midnight.astimezone(self.tz)
else:
return midnight
|
Calculates the solar midnight (the time when the sun is at its lowest
point.)
:param date: The date for which to calculate the midnight time.
If no date is specified then the current date will be used.
:type date: :class:`~datetime.date`
:param local: True = Time to be returned in location's time zone;
False = Time to be returned in UTC.
If not specified then the time will be returned in local time
:type local: bool
:returns: The date and time at which the solar midnight occurs.
:rtype: :class:`~datetime.datetime`
|
def display_hook(fn):
"""
A decorator to wrap display hooks that return a MIME bundle or None.
Additionally it handles adding output to the notebook archive, saves
files specified with the output magic and handles tracebacks.
"""
@wraps(fn)
def wrapped(element):
global FULL_TRACEBACK
if Store.current_backend is None:
return {}, {}
try:
max_frames = OutputSettings.options['max_frames']
mimebundle = fn(element, max_frames=max_frames)
if mimebundle is None:
return {}, {}
mime_data, mime_metadata = mimebundle
if 'text/javascript' in mime_data:
mime_data['text/html'] = mimebundle_to_html(mime_data)
del mime_data['text/javascript']
# Only want to add to the archive for one display hook...
disabled_suffixes = ['png_display', 'svg_display']
if not any(fn.__name__.endswith(suffix) for suffix in disabled_suffixes):
if type(holoviews.archive) is not FileArchive:
holoviews.archive.add(element, html=mime_data['text/html'])
filename = OutputSettings.options['filename']
if filename:
Store.renderers[Store.current_backend].save(element, filename)
return mime_data, mime_metadata
except SkipRendering as e:
if e.warn:
sys.stderr.write(str(e))
return {}, {}
except AbbreviatedException as e:
FULL_TRACEBACK = '\n'.join(traceback.format_exception(e.etype,
e.value,
e.traceback))
info = dict(name=e.etype.__name__,
message=str(e.value).replace('\n','<br>'))
msg = '<i> [Call holoviews.ipython.show_traceback() for details]</i>'
return {'text/html': "<b>{name}</b>{msg}<br>{message}".format(msg=msg, **info)}, {}
except Exception:
raise
return wrapped
|
A decorator to wrap display hooks that return a MIME bundle or None.
Additionally it handles adding output to the notebook archive, saves
files specified with the output magic and handles tracebacks.
|
def kelvin2rgb(temperature):
"""
Converts from Kelvin temperature to an RGB color.
Algorithm credits: |tannerhelland|_
"""
# range check
if temperature < 1000:
temperature = 1000
elif temperature > 40000:
temperature = 40000
tmp_internal = temperature / 100.0
# red
if tmp_internal <= 66:
red = 255
else:
tmp_red = 329.698727446 * np.power(tmp_internal - 60, -0.1332047592)
if tmp_red < 0:
red = 0
elif tmp_red > 255:
red = 255
else:
red = tmp_red
# green
if tmp_internal <= 66:
tmp_green = 99.4708025861 * np.log(tmp_internal) - 161.1195681661
if tmp_green < 0:
green = 0
elif tmp_green > 255:
green = 255
else:
green = tmp_green
else:
tmp_green = 288.1221695283 * np.power(tmp_internal - 60, -0.0755148492)
if tmp_green < 0:
green = 0
elif tmp_green > 255:
green = 255
else:
green = tmp_green
# blue
if tmp_internal >= 66:
blue = 255
elif tmp_internal <= 19:
blue = 0
else:
tmp_blue = 138.5177312231 * np.log(tmp_internal - 10) - 305.0447927307
if tmp_blue < 0:
blue = 0
elif tmp_blue > 255:
blue = 255
else:
blue = tmp_blue
return [red / 255, green / 255, blue / 255]
|
Converts from Kelvin temperature to an RGB color.
Algorithm credits: |tannerhelland|_
|
def store_minions(opts, jid, minions, mminion=None, syndic_id=None):
'''
Store additional minions matched on lower-level masters using the configured
master_job_cache
'''
if mminion is None:
mminion = salt.minion.MasterMinion(opts, states=False, rend=False)
job_cache = opts['master_job_cache']
minions_fstr = '{0}.save_minions'.format(job_cache)
try:
mminion.returners[minions_fstr](jid, minions, syndic_id=syndic_id)
except KeyError:
raise KeyError(
'Returner \'{0}\' does not support function save_minions'.format(
job_cache
)
)
|
Store additional minions matched on lower-level masters using the configured
master_job_cache
|
def init_app(self, app, **kwargs):
"""Initialize application object.
:param app: An instance of :class:`~flask.Flask`.
"""
# Init the configuration
self.init_config(app)
# Enable Rate limiter
self.limiter = Limiter(app, key_func=get_ipaddr)
# Enable secure HTTP headers
if app.config['APP_ENABLE_SECURE_HEADERS']:
self.talisman = Talisman(
app, **app.config.get('APP_DEFAULT_SECURE_HEADERS', {})
)
# Enable PING view
if app.config['APP_HEALTH_BLUEPRINT_ENABLED']:
blueprint = Blueprint('invenio_app_ping', __name__)
@blueprint.route('/ping')
def ping():
"""Load balancer ping view."""
return 'OK'
ping.talisman_view_options = {'force_https': False}
app.register_blueprint(blueprint)
requestid_header = app.config.get('APP_REQUESTID_HEADER')
if requestid_header:
@app.before_request
def set_request_id():
"""Extracts a request id from an HTTP header."""
request_id = request.headers.get(requestid_header)
if request_id:
# Capped at 200 to protect against malicious clients
# sending very large headers.
g.request_id = request_id[:200]
# If installed register the Flask-DebugToolbar extension
try:
from flask_debugtoolbar import DebugToolbarExtension
app.extensions['flask-debugtoolbar'] = DebugToolbarExtension(app)
except ImportError:
app.logger.debug('Flask-DebugToolbar extension not installed.')
# Register self
app.extensions['invenio-app'] = self
|
Initialize application object.
:param app: An instance of :class:`~flask.Flask`.
|
def _step(self,
model: TrainingModel,
batch: mx.io.DataBatch,
checkpoint_interval: int,
metric_train: mx.metric.EvalMetric,
metric_loss: Optional[mx.metric.EvalMetric] = None):
"""
Performs an update to model given a batch and updates metrics.
"""
if model.monitor is not None:
model.monitor.tic()
####################
# Forward & Backward
####################
model.run_forward_backward(batch, metric_train)
# If using an extended optimizer, provide extra state information about the current batch
optimizer = model.optimizer
if metric_loss is not None and isinstance(optimizer, SockeyeOptimizer):
# Loss for this batch
metric_loss.reset()
metric_loss.update(batch.label, model.module.get_outputs())
[(_, m_val)] = metric_loss.get_name_value()
batch_state = BatchState(metric_val=m_val)
optimizer.pre_update_batch(batch_state)
########
# UPDATE
########
if self.update_interval == 1 or self.state.batches % self.update_interval == 0:
# Gradient rescaling
gradient_norm = None
if self.state.updates > 0 and (self.state.updates + 1) % checkpoint_interval == 0:
# compute values for logging to metrics (before rescaling...)
gradient_norm = self.state.gradient_norm = model.get_global_gradient_norm()
self.state.gradients = model.get_gradients()
# note: C.GRADIENT_CLIPPING_TYPE_ABS is handled by the mxnet optimizer directly
if self.optimizer_config.gradient_clipping_type == C.GRADIENT_CLIPPING_TYPE_NORM:
if gradient_norm is None:
gradient_norm = model.get_global_gradient_norm()
# clip gradients
if gradient_norm > self.optimizer_config.gradient_clipping_threshold:
ratio = self.optimizer_config.gradient_clipping_threshold / gradient_norm
model.rescale_gradients(ratio)
model.update()
if self.update_interval > 1:
model.zero_gradients()
self.state.updates += 1
if model.monitor is not None:
results = model.monitor.toc()
if results:
for _, k, v in results:
logger.info('Monitor: Batch [{:d}] {:s} {:s}'.format(self.state.updates, k, v))
|
Performs an update to model given a batch and updates metrics.
|
def _get_style_of_faulting_term(self, C, rup):
"""
Returns the style-of-faulting term.
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered in this class
as rake is required as an input variable
"""
SS, NS, RS = 0.0, 0.0, 0.0
if np.abs(rup.rake) <= 30.0 or (180.0 - np.abs(rup.rake)) <= 30.0:
# strike-slip
SS = 1.0
elif rup.rake > 30.0 and rup.rake < 150.0:
# reverse
RS = 1.0
else:
# normal
NS = 1.0
return (C["sofN"] * NS) + (C["sofR"] * RS) + (C["sofS"] * SS)
|
Returns the style-of-faulting term.
Fault type (Strike-slip, Normal, Thrust/reverse) is
derived from rake angle.
Rakes angles within 30 of horizontal are strike-slip,
angles from 30 to 150 are reverse, and angles from
-30 to -150 are normal.
Note that the 'Unspecified' case is not considered in this class
as rake is required as an input variable
|
def execute_python_script(self, script):
"""
Execute a python script of the remote server
:param script: Inline script to convert to a file and execute remotely
:return: The output of the script execution
"""
# Create the local file to copy to remote
file_handle, filename = tempfile.mkstemp()
temp_file = os.fdopen(file_handle, "wt")
temp_file.write(script)
temp_file.close()
# Put the file into the remote user directory
self.put(filename, "python_execute.py")
command = ["python", "python_execute.py"]
# Execute the python script on the remote system, clean up, and return the output
output = self.execute(command, False)
self.remove("python_execute.py")
os.unlink(filename)
return output
|
Execute a python script of the remote server
:param script: Inline script to convert to a file and execute remotely
:return: The output of the script execution
|
def __Restore_Geometry_On_Layout_Change_checkBox_set_ui(self):
"""
Sets the **Restore_Geometry_On_Layout_Change_checkBox** Widget.
"""
# Adding settings key if it doesn't exists.
self.__settings.get_key("Settings", "restore_geometry_on_layout_change").isNull() and \
self.__settings.set_key("Settings", "restore_geometry_on_layout_change", Qt.Unchecked)
restore_geometry_on_layout_change = foundations.common.get_first_item(
self.__settings.get_key("Settings", "restore_geometry_on_layout_change").toInt())
LOGGER.debug("> Setting '{0}' with value '{1}'.".format("Restore_Geometry_On_Layout_Change_checkBox",
restore_geometry_on_layout_change))
self.Restore_Geometry_On_Layout_Change_checkBox.setCheckState(restore_geometry_on_layout_change)
self.__engine.layouts_manager.restore_geometry_on_layout_change = True if restore_geometry_on_layout_change else False
|
Sets the **Restore_Geometry_On_Layout_Change_checkBox** Widget.
|
def nth(iterable, n, default=None):
"""Returns the nth item or a default value
Arguments
---------
iterable : iterable
n : int
default : default=None
The default value to return
"""
if type(n) != int:
raise TypeError("n is not an integer")
return next(islice(iterable, n, None), default)
|
Returns the nth item or a default value
Arguments
---------
iterable : iterable
n : int
default : default=None
The default value to return
|
def read_asc_grid(filename, footer=0):
"""Reads ASCII grid file (*.asc).
Parameters
----------
filename : str
Name of *.asc file.
footer : int, optional
Number of lines at bottom of *.asc file to skip.
Returns
-------
grid_array : numpy array, shape (M, N)
(M, N) array of grid values, where M is number of Y-coordinates and
N is number of X-coordinates. The array entry corresponding to
the lower-left coordinates is at index [M, 0], so that
the array is oriented as it would be in X-Y space.
x : numpy array, shape (N,)
1D array of N X-coordinates.
y : numpy array, shape (M,)
1D array of M Y-coordinates.
CELLSIZE : tuple or float
Either a two-tuple of (x-cell size, y-cell size),
or a float that specifies the uniform cell size.
NODATA : float
Value that specifies which entries are not actual data.
"""
ncols = None
nrows = None
xllcorner = None
xllcenter = None
yllcorner = None
yllcenter = None
cellsize = None
dx = None
dy = None
no_data = None
header_lines = 0
with io.open(filename, 'r') as f:
while True:
string, value = f.readline().split()
header_lines += 1
if string.lower() == 'ncols':
ncols = int(value)
elif string.lower() == 'nrows':
nrows = int(value)
elif string.lower() == 'xllcorner':
xllcorner = float(value)
elif string.lower() == 'xllcenter':
xllcenter = float(value)
elif string.lower() == 'yllcorner':
yllcorner = float(value)
elif string.lower() == 'yllcenter':
yllcenter = float(value)
elif string.lower() == 'cellsize':
cellsize = float(value)
elif string.lower() == 'cell_size':
cellsize = float(value)
elif string.lower() == 'dx':
dx = float(value)
elif string.lower() == 'dy':
dy = float(value)
elif string.lower() == 'nodata_value':
no_data = float(value)
elif string.lower() == 'nodatavalue':
no_data = float(value)
else:
raise IOError("could not read *.asc file. Error in header.")
if (ncols is not None) and \
(nrows is not None) and \
(((xllcorner is not None) and (yllcorner is not None)) or
((xllcenter is not None) and (yllcenter is not None))) and \
((cellsize is not None) or ((dx is not None) and (dy is not None))) and \
(no_data is not None):
break
raw_grid_array = np.genfromtxt(filename, skip_header=header_lines,
skip_footer=footer)
grid_array = np.flipud(raw_grid_array)
if nrows != grid_array.shape[0] or ncols != grid_array.shape[1]:
raise IOError("Error reading *.asc file. Encountered problem "
"with header: NCOLS and/or NROWS does not match "
"number of columns/rows in data file body.")
if xllcorner is not None and yllcorner is not None:
if dx is not None and dy is not None:
xllcenter = xllcorner + dx/2.0
yllcenter = yllcorner + dy/2.0
else:
xllcenter = xllcorner + cellsize/2.0
yllcenter = yllcorner + cellsize/2.0
if dx is not None and dy is not None:
x = np.arange(xllcenter, xllcenter + ncols*dx, dx)
y = np.arange(yllcenter, yllcenter + nrows*dy, dy)
else:
x = np.arange(xllcenter, xllcenter + ncols*cellsize, cellsize)
y = np.arange(yllcenter, yllcenter + nrows*cellsize, cellsize)
# Sometimes x and y and can be an entry too long due to imprecision
# in calculating the upper cutoff for np.arange(); this bit takes care of
# that potential problem.
if x.size == ncols + 1:
x = x[:-1]
if y.size == nrows + 1:
y = y[:-1]
if cellsize is None:
cellsize = (dx, dy)
return grid_array, x, y, cellsize, no_data
|
Reads ASCII grid file (*.asc).
Parameters
----------
filename : str
Name of *.asc file.
footer : int, optional
Number of lines at bottom of *.asc file to skip.
Returns
-------
grid_array : numpy array, shape (M, N)
(M, N) array of grid values, where M is number of Y-coordinates and
N is number of X-coordinates. The array entry corresponding to
the lower-left coordinates is at index [M, 0], so that
the array is oriented as it would be in X-Y space.
x : numpy array, shape (N,)
1D array of N X-coordinates.
y : numpy array, shape (M,)
1D array of M Y-coordinates.
CELLSIZE : tuple or float
Either a two-tuple of (x-cell size, y-cell size),
or a float that specifies the uniform cell size.
NODATA : float
Value that specifies which entries are not actual data.
|
def FormatAsHexString(num, width=None, prefix="0x"):
"""Takes an int and returns the number formatted as a hex string."""
# Strip "0x".
hex_str = hex(num)[2:]
# Strip "L" for long values.
hex_str = hex_str.replace("L", "")
if width:
hex_str = hex_str.rjust(width, "0")
return "%s%s" % (prefix, hex_str)
|
Takes an int and returns the number formatted as a hex string.
|
def marketShortInterestDF(date=None, token='', version=''):
'''The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.
The report data will be published daily at 4:00pm ET.
https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev
Args:
date (datetime); Effective Datetime
token (string); Access token
version (string); API version
Returns:
DataFrame: result
'''
df = pd.DataFrame(marketShortInterest(date, token, version))
_toDatetime(df)
return df
|
The consolidated market short interest positions in all IEX-listed securities are included in the IEX Short Interest Report.
The report data will be published daily at 4:00pm ET.
https://iexcloud.io/docs/api/#listed-short-interest-list-in-dev
Args:
date (datetime); Effective Datetime
token (string); Access token
version (string); API version
Returns:
DataFrame: result
|
def import_command(dest, src, name, api=None, filter_symbol=None):
"""Import Command `name` and its dependencies from Registry `src`
to Registry `dest`
:param Registry dest: Destination Registry
:param Registry src: Source Registry
:param str name: Name of Command to import
:param str api: Prefer to import Types with api name `api`, or None to
import Types with no api name
:param filter_symbol: Optional filter callable
:type filter_symbol: Callable with signature
``(symbol_type:str, symbol_name:str) -> bool``
"""
if not filter_symbol:
filter_symbol = _default_filter_symbol
cmd = src.commands[name]
for x in cmd.required_types:
if not filter_symbol('type', x):
continue
import_type(dest, src, x, api, filter_symbol)
dest.commands[name] = cmd
|
Import Command `name` and its dependencies from Registry `src`
to Registry `dest`
:param Registry dest: Destination Registry
:param Registry src: Source Registry
:param str name: Name of Command to import
:param str api: Prefer to import Types with api name `api`, or None to
import Types with no api name
:param filter_symbol: Optional filter callable
:type filter_symbol: Callable with signature
``(symbol_type:str, symbol_name:str) -> bool``
|
def _login(login_func, *args):
"""A helper function for logging in. It's purpose is to avoid duplicate
code in the login functions.
"""
response = login_func(*args)
_fail_if_contains_errors(response)
user_json = response.json()
return User(user_json)
|
A helper function for logging in. It's purpose is to avoid duplicate
code in the login functions.
|
def get_energy_management_properties(self):
"""
Return the energy management properties of the CPC.
The returned energy management properties are a subset of the
properties of the CPC resource, and are also available as normal
properties of the CPC resource. In so far, there is no new data
provided by this method. However, because only a subset of the
properties is returned, this method is faster than retrieving the
complete set of CPC properties (e.g. via
:meth:`~zhmcclient.BaseResource.pull_full_properties`).
This method performs the HMC operation "Get CPC Energy Management
Data", and returns only the energy management properties for this CPC
from the operation result. Note that in non-ensemble mode of a CPC, the
HMC operation result will only contain data for the CPC alone.
It requires that the feature "Automate/advanced management suite"
(FC 0020) is installed and enabled, and returns empty values for most
properties, otherwise.
Authorization requirements:
* Object-access permission to this CPC.
Returns:
dict: A dictionary of properties of the CPC that are related to
energy management. For details, see section "Energy management
related additional properties" in the data model for the CPC
resource in the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of
operation "Get CPC Energy Management Data" in the :term:`HMC API`
book.
:exc:`~zhmcclient.ParseError`: Also raised by this method when the
JSON response could be parsed but contains inconsistent data.
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
result = self.manager.session.get(self.uri + '/energy-management-data')
em_list = result['objects']
if len(em_list) != 1:
uris = [em_obj['object-uri'] for em_obj in em_list]
raise ParseError("Energy management data returned for no resource "
"or for more than one resource: %r" % uris)
em_cpc_obj = em_list[0]
if em_cpc_obj['object-uri'] != self.uri:
raise ParseError("Energy management data returned for an "
"unexpected resource: %r" %
em_cpc_obj['object-uri'])
if em_cpc_obj['error-occurred']:
raise ParseError("Errors occurred when retrieving energy "
"management data for CPC. Operation result: %r" %
result)
cpc_props = em_cpc_obj['properties']
return cpc_props
|
Return the energy management properties of the CPC.
The returned energy management properties are a subset of the
properties of the CPC resource, and are also available as normal
properties of the CPC resource. In so far, there is no new data
provided by this method. However, because only a subset of the
properties is returned, this method is faster than retrieving the
complete set of CPC properties (e.g. via
:meth:`~zhmcclient.BaseResource.pull_full_properties`).
This method performs the HMC operation "Get CPC Energy Management
Data", and returns only the energy management properties for this CPC
from the operation result. Note that in non-ensemble mode of a CPC, the
HMC operation result will only contain data for the CPC alone.
It requires that the feature "Automate/advanced management suite"
(FC 0020) is installed and enabled, and returns empty values for most
properties, otherwise.
Authorization requirements:
* Object-access permission to this CPC.
Returns:
dict: A dictionary of properties of the CPC that are related to
energy management. For details, see section "Energy management
related additional properties" in the data model for the CPC
resource in the :term:`HMC API` book.
Raises:
:exc:`~zhmcclient.HTTPError`: See the HTTP status and reason codes of
operation "Get CPC Energy Management Data" in the :term:`HMC API`
book.
:exc:`~zhmcclient.ParseError`: Also raised by this method when the
JSON response could be parsed but contains inconsistent data.
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
|
def uninstall_wic(self, wic_slot_number):
"""
Uninstalls a WIC adapter from this router.
:param wic_slot_number: WIC slot number
"""
# WICs are always installed on adapters in slot 0
slot_number = 0
# Do not check if slot has an adapter because adapters with WICs interfaces
# must be inserted by default in the router and cannot be removed.
adapter = self._slots[slot_number]
if wic_slot_number > len(adapter.wics) - 1:
raise DynamipsError("WIC slot {wic_slot_number} doesn't exist".format(wic_slot_number=wic_slot_number))
if adapter.wic_slot_available(wic_slot_number):
raise DynamipsError("No WIC is installed in WIC slot {wic_slot_number}".format(wic_slot_number=wic_slot_number))
# Dynamips WICs slot IDs start on a multiple of 16
# WIC1 = 16, WIC2 = 32 and WIC3 = 48
internal_wic_slot_number = 16 * (wic_slot_number + 1)
yield from self._hypervisor.send('vm slot_remove_binding "{name}" {slot_number} {wic_slot_number}'.format(name=self._name,
slot_number=slot_number,
wic_slot_number=internal_wic_slot_number))
log.info('Router "{name}" [{id}]: {wic} removed from WIC slot {wic_slot_number}'.format(name=self._name,
id=self._id,
wic=adapter.wics[wic_slot_number],
wic_slot_number=wic_slot_number))
adapter.uninstall_wic(wic_slot_number)
|
Uninstalls a WIC adapter from this router.
:param wic_slot_number: WIC slot number
|
def compute_summary_statistic(iscs, summary_statistic='mean', axis=None):
"""Computes summary statistics for ISCs
Computes either the 'mean' or 'median' across a set of ISCs. In the
case of the mean, ISC values are first Fisher Z transformed (arctanh),
averaged, then inverse Fisher Z transformed (tanh).
The implementation is based on the work in [SilverDunlap1987]_.
.. [SilverDunlap1987] "Averaging corrlelation coefficients: should
Fisher's z transformation be used?", N. C. Silver, W. P. Dunlap, 1987,
Journal of Applied Psychology, 72, 146-148.
https://doi.org/10.1037/0021-9010.72.1.146
Parameters
----------
iscs : list or ndarray
ISC values
summary_statistic : str, default: 'mean'
Summary statistic, 'mean' or 'median'
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
Returns
-------
statistic : float or ndarray
Summary statistic of ISC values
"""
if summary_statistic not in ('mean', 'median'):
raise ValueError("Summary statistic must be 'mean' or 'median'")
# Compute summary statistic
if summary_statistic == 'mean':
statistic = np.tanh(np.nanmean(np.arctanh(iscs), axis=axis))
elif summary_statistic == 'median':
statistic = np.nanmedian(iscs, axis=axis)
return statistic
|
Computes summary statistics for ISCs
Computes either the 'mean' or 'median' across a set of ISCs. In the
case of the mean, ISC values are first Fisher Z transformed (arctanh),
averaged, then inverse Fisher Z transformed (tanh).
The implementation is based on the work in [SilverDunlap1987]_.
.. [SilverDunlap1987] "Averaging corrlelation coefficients: should
Fisher's z transformation be used?", N. C. Silver, W. P. Dunlap, 1987,
Journal of Applied Psychology, 72, 146-148.
https://doi.org/10.1037/0021-9010.72.1.146
Parameters
----------
iscs : list or ndarray
ISC values
summary_statistic : str, default: 'mean'
Summary statistic, 'mean' or 'median'
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array.
Returns
-------
statistic : float or ndarray
Summary statistic of ISC values
|
def list_product_releases(page_size=200, page_index=0, sort="", q=""):
"""
List all ProductReleases
"""
data = list_product_releases_raw(page_size, page_index, sort, q)
if data:
return utils.format_json_list(data)
|
List all ProductReleases
|
def path_helper(self, path=None, operations=None, **kwargs):
"""
Works like a apispec plugin
May return a path as string and mutate operations dict.
:param str path: Path to the resource
:param dict operations: A `dict` mapping HTTP methods to operation object. See
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#operationObject
:param kwargs:
:return: Return value should be a string or None. If a string is returned, it
is set as the path.
"""
RE_URL = re.compile(r"<(?:[^:<>]+:)?([^<>]+)>")
path = RE_URL.sub(r"{\1}", path)
return "/{}{}".format(self.resource_name, path)
|
Works like a apispec plugin
May return a path as string and mutate operations dict.
:param str path: Path to the resource
:param dict operations: A `dict` mapping HTTP methods to operation object. See
https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#operationObject
:param kwargs:
:return: Return value should be a string or None. If a string is returned, it
is set as the path.
|
def set_xylims(self, lims, axes=None, panel=None):
"""overwrite data for trace t """
if panel is None: panel = self.current_panel
self.panels[panel].set_xylims(lims, axes=axes, **kw)
|
overwrite data for trace t
|
def blockSignals( self, state ):
"""
Blocks the signals for this widget and its sub-parts.
:param state | <bool>
"""
super(XLocationWidget, self).blockSignals(state)
self._locationEdit.blockSignals(state)
self._locationButton.blockSignals(state)
|
Blocks the signals for this widget and its sub-parts.
:param state | <bool>
|
def add_alias(self, alias, source, clean=True):
"""Add an alias, optionally 'cleaning' the alias string.
Calls the parent `catalog` method `clean_entry_name` - to apply the
same name-cleaning as is applied to entry names themselves.
Returns
-------
alias : str
The stored version of the alias (cleaned or not).
"""
if clean:
alias = self.catalog.clean_entry_name(alias)
self.add_quantity(self._KEYS.ALIAS, alias, source)
return alias
|
Add an alias, optionally 'cleaning' the alias string.
Calls the parent `catalog` method `clean_entry_name` - to apply the
same name-cleaning as is applied to entry names themselves.
Returns
-------
alias : str
The stored version of the alias (cleaned or not).
|
def get_unique_together_validators(self):
"""
Determine a default set of validators for any unique_together contraints.
"""
model_class_inheritance_tree = (
[self.Meta.model] +
list(self.Meta.model._meta.parents.keys())
)
# The field names we're passing though here only include fields
# which may map onto a model field. Any dotted field name lookups
# cannot map to a field, and must be a traversal, so we're not
# including those.
field_names = {
field.source for field in self.fields.values()
if (field.source != '*') and ('.' not in field.source)
}
# Note that we make sure to check `unique_together` both on the
# base model class, but also on any parent classes.
validators = []
for parent_class in model_class_inheritance_tree:
for unique_together in parent_class._meta.unique_together:
if field_names.issuperset(set(unique_together)):
validator = UniqueTogetherValidator(
queryset=parent_class._default_manager,
fields=unique_together
)
validators.append(validator)
return validators
|
Determine a default set of validators for any unique_together contraints.
|
def add_mandates(self, representative, rep_json):
'''
Create mandates from rep data based on variant configuration
'''
# Mandate in country group for party constituency
if rep_json.get('parti_ratt_financier'):
constituency, _ = Constituency.objects.get_or_create(
name=rep_json.get('parti_ratt_financier'), country=self.france)
group, _ = self.touch_model(model=Group,
abbreviation=self.france.code,
kind='country',
name=self.france.name)
_create_mandate(representative, group, constituency, 'membre')
# Configurable mandates
for mdef in self.variant['mandates']:
if mdef.get('chamber', False):
chamber = self.chamber
else:
chamber = None
if 'from' in mdef:
elems = mdef['from'](rep_json)
else:
elems = [rep_json]
for elem in elems:
name = _get_mdef_item(mdef, 'name', elem, '')
abbr = _get_mdef_item(mdef, 'abbr', elem, '')
group, _ = self.touch_model(model=Group,
abbreviation=abbr,
kind=mdef['kind'],
chamber=chamber,
name=name)
role = _get_mdef_item(mdef, 'role', elem, 'membre')
start = _get_mdef_item(mdef, 'start', elem, None)
if start is not None:
start = _parse_date(start)
end = _get_mdef_item(mdef, 'end', elem, None)
if end is not None:
end = _parse_date(end)
_create_mandate(representative, group, self.ch_constituency,
role, start, end)
logger.debug(
'%s => %s: %s of "%s" (%s) %s-%s' % (rep_json['slug'],
mdef['kind'], role, name, abbr, start, end))
|
Create mandates from rep data based on variant configuration
|
def reduce_after(method):
'''reduce() the result of this method call (unless you already reduced it).'''
def new_method(self, *args, **kwargs):
result = method(self, *args, **kwargs)
if result == self:
return result
return result.reduce()
return new_method
|
reduce() the result of this method call (unless you already reduced it).
|
def get_first_and_last(year, month):
"""Returns two datetimes: first day and last day of given year&month"""
ym_first = make_aware(
datetime.datetime(year, month, 1),
get_default_timezone()
)
ym_last = make_aware(
datetime.datetime(year, month, monthrange(year, month)[1], 23, 59, 59, 1000000-1),
get_default_timezone()
)
return ym_first, ym_last
|
Returns two datetimes: first day and last day of given year&month
|
def with_json_path(self, path, field=None):
"""Annotate Storage objects with a specific JSON path.
:param path: Path to get inside the stored object, which can be
either a list of path components or a comma-separated
string
:param field: Optional output field name
"""
if field is None:
field = '_'.join(['json'] + json_path_components(path))
kwargs = {field: JsonGetPath('json', path)}
return self.defer('json').annotate(**kwargs)
|
Annotate Storage objects with a specific JSON path.
:param path: Path to get inside the stored object, which can be
either a list of path components or a comma-separated
string
:param field: Optional output field name
|
def resize(self, new_size):
"""Create a new larger array, and copy data over"""
assert new_size > self.size
new_data = self._allocate(new_size)
# copy
new_data[0:self.size * self.chunk_size] = self.data
self.size = new_size
self.data = new_data
|
Create a new larger array, and copy data over
|
def members(self, is_manager=None):
"""
Retrieve members of the scope.
:param is_manager: (optional) set to True to return only Scope members that are also managers.
:type is_manager: bool
:return: List of members (usernames)
Examples
--------
>>> members = project.members()
>>> managers = project.members(is_manager=True)
"""
if not is_manager:
return [member for member in self._json_data['members'] if member['is_active']]
else:
return [member for member in self._json_data['members'] if
member.get('is_active', False) and member.get('is_manager', False)]
|
Retrieve members of the scope.
:param is_manager: (optional) set to True to return only Scope members that are also managers.
:type is_manager: bool
:return: List of members (usernames)
Examples
--------
>>> members = project.members()
>>> managers = project.members(is_manager=True)
|
def add_notification_listener(self, notification_type, notification_callback):
""" Add a notification callback to the notification center.
Args:
notification_type: A string representing the notification type from .helpers.enums.NotificationTypes
notification_callback: closure of function to call when event is triggered.
Returns:
Integer notification id used to remove the notification or -1 if the notification has already been added.
"""
if notification_type not in self.notifications:
self.notifications[notification_type] = [(self.notification_id, notification_callback)]
else:
if reduce(lambda a, b: a + 1,
filter(lambda tup: tup[1] == notification_callback, self.notifications[notification_type]),
0) > 0:
return -1
self.notifications[notification_type].append((self.notification_id, notification_callback))
ret_val = self.notification_id
self.notification_id += 1
return ret_val
|
Add a notification callback to the notification center.
Args:
notification_type: A string representing the notification type from .helpers.enums.NotificationTypes
notification_callback: closure of function to call when event is triggered.
Returns:
Integer notification id used to remove the notification or -1 if the notification has already been added.
|
def remove_user_from_group(self, username, groupname, raise_on_error=False):
"""Remove a user from a group
Attempts to remove a user from a group
Args
username: The username to remove from the group.
groupname: The group name to be removed from the user.
Returns:
True: Succeeded
False: If unsuccessful
"""
response = self._delete(self.rest_url + "/group/user/direct",params={"username": username, "groupname": groupname})
if response.status_code == 204:
return True
if raise_on_error:
raise RuntimeError(response.json()['message'])
return False
|
Remove a user from a group
Attempts to remove a user from a group
Args
username: The username to remove from the group.
groupname: The group name to be removed from the user.
Returns:
True: Succeeded
False: If unsuccessful
|
def expand(self, m):
"""Using the template, expand the string."""
if m is None:
raise ValueError("Match is None!")
sep = m.string[:0]
if isinstance(sep, bytes) != self._bytes:
raise TypeError('Match string type does not match expander string type!')
text = []
# Expand string
for x in range(0, len(self.literals)):
index = x
l = self.literals[x]
if l is None:
g_index = self._get_group_index(index)
span_case, single_case, capture = self._get_group_attributes(index)
if not self.use_format:
# Non format replace
try:
l = m.group(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % capture)
else:
# String format replace
try:
obj = m.captures(g_index)
except IndexError: # pragma: no cover
raise IndexError("'%d' is out of range!" % g_index)
l = _util.format_string(m, obj, capture, self._bytes)
if span_case is not None:
if span_case == _LOWER:
l = l.lower()
else:
l = l.upper()
if single_case is not None:
if single_case == _LOWER:
l = l[0:1].lower() + l[1:]
else:
l = l[0:1].upper() + l[1:]
text.append(l)
return sep.join(text)
|
Using the template, expand the string.
|
def os_walk_pre_35(top, topdown=True, onerror=None, followlinks=False):
"""Pre Python 3.5 implementation of os.walk() that doesn't use scandir."""
islink, join, isdir = os.path.islink, os.path.join, os.path.isdir
try:
names = os.listdir(top)
except OSError as err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = join(top, name)
if followlinks or not islink(new_path):
for x in os_walk_pre_35(new_path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
|
Pre Python 3.5 implementation of os.walk() that doesn't use scandir.
|
def select(cls, dataset, selection_mask=None, **selection):
"""
Apply a selection to the data.
"""
import iris
constraint = cls.select_to_constraint(dataset, selection)
pre_dim_coords = [c.name() for c in dataset.data.dim_coords]
indexed = cls.indexed(dataset, selection)
extracted = dataset.data.extract(constraint)
if indexed and not extracted.dim_coords:
return extracted.data.item()
post_dim_coords = [c.name() for c in extracted.dim_coords]
dropped = [c for c in pre_dim_coords if c not in post_dim_coords]
for d in dropped:
extracted = iris.util.new_axis(extracted, d)
return extracted
|
Apply a selection to the data.
|
def match_rows(rows1, rows2, key, sort_keys=True):
"""
Yield triples of `(value, left_rows, right_rows)` where
`left_rows` and `right_rows` are lists of rows that share the same
column value for *key*. This means that both *rows1* and *rows2*
must have a column with the same name *key*.
.. warning::
Both *rows1* and *rows2* will exist in memory for this
operation, so it is not recommended for very large tables on
low-memory systems.
Args:
rows1: a :class:`Table` or list of :class:`Record` objects
rows2: a :class:`Table` or list of :class:`Record` objects
key (str): the column name on which to match
sort_keys (bool): if `True`, yield matching rows sorted by the
matched key instead of the original order
"""
matched = OrderedDict()
for i, rows in enumerate([rows1, rows2]):
for row in rows:
val = row[key]
try:
data = matched[val]
except KeyError:
matched[val] = ([], [])
data = matched[val]
data[i].append(row)
vals = matched.keys()
if sort_keys:
vals = sorted(vals, key=safe_int)
for val in vals:
left, right = matched[val]
yield (val, left, right)
|
Yield triples of `(value, left_rows, right_rows)` where
`left_rows` and `right_rows` are lists of rows that share the same
column value for *key*. This means that both *rows1* and *rows2*
must have a column with the same name *key*.
.. warning::
Both *rows1* and *rows2* will exist in memory for this
operation, so it is not recommended for very large tables on
low-memory systems.
Args:
rows1: a :class:`Table` or list of :class:`Record` objects
rows2: a :class:`Table` or list of :class:`Record` objects
key (str): the column name on which to match
sort_keys (bool): if `True`, yield matching rows sorted by the
matched key instead of the original order
|
def housecode_to_index(housecode):
"""Convert a X10 housecode to a zero-based index"""
match = re.search(r'^([A-P])(\d{1,2})$', housecode.upper())
if match:
house_index = int(match.group(2))
if 1 <= house_index <= 16:
return (ord(match.group(1)) - ord('A')) * 16 + house_index - 1
raise ValueError("Invalid X10 housecode: %s" % housecode)
|
Convert a X10 housecode to a zero-based index
|
def list_vrf(self):
""" List VRFs and return JSON encoded result.
"""
try:
vrfs = VRF.list()
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(vrfs, cls=NipapJSONEncoder)
|
List VRFs and return JSON encoded result.
|
def connect_mysql(host, port, user, password, database):
"""Connect to MySQL with retries."""
return pymysql.connect(
host=host, port=port,
user=user, passwd=password,
db=database
)
|
Connect to MySQL with retries.
|
def letras(song):
"""
Returns the lyrics found in letras.com for the specified mp3 file or an
empty string if not found.
"""
translate = {
'&': 'a',
URLESCAPE: '',
' ': '-'
}
artist = song.artist.lower()
artist = normalize(artist, translate)
title = song.title.lower()
title = normalize(title, translate)
url = 'https://www.letras.com/{}/{}/'.format(artist, title)
soup = get_url(url)
if not soup:
return ''
found_title = soup.select_one('div.cnt-head_title h1')
if not found_title:
# The site didn't find lyrics and took us to the homepage
return ''
found_title = found_title.get_text()
found_title = re.sub(r'[\W_]+', '', found_title.lower())
if found_title != re.sub(r'[\W_]+', '', song.title.lower()):
# The site took us to the wrong song page
return ''
content = soup.find('article')
if not content:
return ''
text = ''
for br in content.find_all('br'):
br.replace_with('\n')
for p in content.find_all('p'):
text += p.get_text() + '\n\n'
return text.strip()
|
Returns the lyrics found in letras.com for the specified mp3 file or an
empty string if not found.
|
def foldl1(f: Callable[[T, T], T], xs: Iterable[T]) -> T:
""" Returns the accumulated result of a binary function applied to elements
of an iterable.
.. math::
foldl1(f, [x_0, x_1, x_2, x_3]) = f(f(f(f(x_0, x_1), x_2), x_3)
Examples
--------
>>> from delphi.utils.fp import foldl1
>>> foldl1(lambda x, y: x + y, range(5))
10
"""
return reduce(f, xs)
|
Returns the accumulated result of a binary function applied to elements
of an iterable.
.. math::
foldl1(f, [x_0, x_1, x_2, x_3]) = f(f(f(f(x_0, x_1), x_2), x_3)
Examples
--------
>>> from delphi.utils.fp import foldl1
>>> foldl1(lambda x, y: x + y, range(5))
10
|
def append(args):
"""
%prog append bamfile
Append /1 or /2 to read names. Useful for using the Tophat2 bam file for
training AUGUSTUS gene models.
"""
p = OptionParser(append.__doc__)
p.add_option("--prepend", help="Prepend string to read names")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
bamfile, = args
prepend = opts.prepend
icmd = "samtools view -h {0}".format(bamfile)
bamfile = bamfile.rsplit(".", 1)[0] + ".append.bam"
ocmd = "samtools view -b -@ 64 - -o {0}".format(bamfile)
p = Popen(ocmd, stdin=PIPE)
for row in popen(icmd):
if row[0] == '@':
print(row.strip(), file=p.stdin)
else:
s = SamLine(row)
if prepend:
s.qname = prepend + "_" + s.qname
else:
s.update_readname()
print(s, file=p.stdin)
|
%prog append bamfile
Append /1 or /2 to read names. Useful for using the Tophat2 bam file for
training AUGUSTUS gene models.
|
def _is_numeric_data(self, data_type):
"""Private method for testing text data types."""
dt = DATA_TYPES[data_type]
if dt['min'] and dt['max']:
if type(self.data) is dt['type'] and dt['min'] < self.data < dt['max']:
self.type = data_type.upper()
self.len = len(str(self.data))
return True
|
Private method for testing text data types.
|
def missing_categories(context):
''' Adds the categories that the user does not currently have. '''
user = user_for_context(context)
categories_available = set(CategoryController.available_categories(user))
items = ItemController(user).items_pending_or_purchased()
categories_held = set()
for product, quantity in items:
categories_held.add(product.category)
return categories_available - categories_held
|
Adds the categories that the user does not currently have.
|
def load_from_rdf_file(self, rdf_file):
"""Initialize given an RDF input file representing the hierarchy."
Parameters
----------
rdf_file : str
Path to an RDF file.
"""
self.graph = rdflib.Graph()
self.graph.parse(os.path.abspath(rdf_file), format='nt')
self.initialize()
|
Initialize given an RDF input file representing the hierarchy."
Parameters
----------
rdf_file : str
Path to an RDF file.
|
def rank(self, n, mu, sigma, crit=.5, upper=10000, xtol=1):
"""%(super)s
Additional Parameters
----------------------
{0}
"""
return _make_rank(self, n, mu, sigma, crit=crit, upper=upper,
xtol=xtol)
|
%(super)s
Additional Parameters
----------------------
{0}
|
def save(self, session_file, verbose=False):
"""
Saves the current session to an existing file, which will be replaced.
If this is a new session that has not been saved yet, use 'save as'
instead.
:param session_file: The path to the file where the current session
must be saved to.
:param verbose: print more
"""
PARAMS={"file":session_file}
response=api(url=self.__url+"/save", PARAMS=PARAMS, verbose=verbose)
return response
|
Saves the current session to an existing file, which will be replaced.
If this is a new session that has not been saved yet, use 'save as'
instead.
:param session_file: The path to the file where the current session
must be saved to.
:param verbose: print more
|
def authentication(self):
"""Generate authentication string."""
if self.session.digest:
authentication = self.session.generate_digest()
elif self.session.basic:
authentication = self.session.generate_basic()
else:
return ''
return "Authorization: " + authentication + '\r\n'
|
Generate authentication string.
|
def make_cache_keys(self, endpoint, kwargs):
""" This function is built to provide cache keys for templates
:param endpoint: Current endpoint
:param kwargs: Keyword Arguments
:return: tuple of i18n dependant cache key and i18n ignoring cache key
:rtype: tuple(str)
"""
keys = sorted(kwargs.keys())
i18n_cache_key = endpoint+"|"+"|".join([kwargs[k] for k in keys])
if "lang" in keys:
cache_key = endpoint+"|" + "|".join([kwargs[k] for k in keys if k != "lang"])
else:
cache_key = i18n_cache_key
return i18n_cache_key, cache_key
|
This function is built to provide cache keys for templates
:param endpoint: Current endpoint
:param kwargs: Keyword Arguments
:return: tuple of i18n dependant cache key and i18n ignoring cache key
:rtype: tuple(str)
|
def get_future_days(self):
"""Return only future Day objects."""
today = timezone.now().date()
return Day.objects.filter(date__gte=today)
|
Return only future Day objects.
|
def create_training_instances(x):
"""Create `TrainingInstance`s from raw text."""
(input_files, out, tokenizer, max_seq_length, dupe_factor,
short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng) = x
time_start = time.time()
logging.info('Processing %s', input_files)
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
with io.open(input_file, 'r', encoding='UTF-8') as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer(line) if not args.tokenized else line.split(' ')
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
vocab_words = tokenizer.vocab.idx_to_token
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length,
short_seq_prob, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng))
rng.shuffle(instances)
input_ids = []
segment_ids = []
masked_lm_positions = []
masked_lm_ids = []
masked_lm_weights = []
next_sentence_labels = []
valid_lengths = []
for inst_index, instance in enumerate(instances):
feature = transform(instance, tokenizer, max_seq_length, max_predictions_per_seq, False)
input_ids.append(
np.ascontiguousarray(feature['input_ids'], dtype='int32'))
segment_ids.append(
np.ascontiguousarray(feature['segment_ids'], dtype='int32'))
masked_lm_positions.append(
np.ascontiguousarray(feature['masked_lm_positions'], dtype='int32'))
masked_lm_ids.append(np.ascontiguousarray(feature['masked_lm_ids'], dtype='int32'))
masked_lm_weights.append(
np.ascontiguousarray(feature['masked_lm_weights'], dtype='float32'))
next_sentence_labels.append(feature['next_sentence_labels'][0])
valid_lengths.append(feature['valid_lengths'][0])
if inst_index < 20:
print_example(instance, feature)
features = (input_ids, segment_ids, masked_lm_positions, masked_lm_ids, \
masked_lm_weights, next_sentence_labels, valid_lengths)
logging.info('*** Writing to output file %s ***', out)
if args.format == 'numpy':
write_to_files_np(features, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, [out])
elif args.format == 'recordio':
write_to_files_rec(instances, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, [out])
else:
raise ValueError('unsupported format: %s'%args.format)
time_end = time.time()
logging.info('Process %d files took %.1f s', len(input_files), time_end - time_start)
|
Create `TrainingInstance`s from raw text.
|
def select_qadapter(self, pconfs):
"""
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
"""
# Order the list of configurations according to policy.
policy, max_ncpus = self.policy, self.max_cores
pconfs = pconfs.get_ordered_with_policy(policy, max_ncpus)
if policy.precedence == "qadapter":
# Try to run on the qadapter with the highest priority.
for qadpos, qad in enumerate(self.qads):
possible_pconfs = [pc for pc in pconfs if qad.can_run_pconf(pc)]
if qad.allocation == "nodes":
#if qad.allocation in ["nodes", "force_nodes"]:
# Select the configuration divisible by nodes if possible.
for pconf in possible_pconfs:
if pconf.num_cores % qad.hw.cores_per_node == 0:
return self._use_qadpos_pconf(qadpos, pconf)
# Here we select the first one.
if possible_pconfs:
return self._use_qadpos_pconf(qadpos, possible_pconfs[0])
elif policy.precedence == "autoparal_conf":
# Try to run on the first pconf irrespectively of the priority of the qadapter.
for pconf in pconfs:
for qadpos, qad in enumerate(self.qads):
if qad.allocation == "nodes" and not pconf.num_cores % qad.hw.cores_per_node == 0:
continue # Ignore it. not very clean
if qad.can_run_pconf(pconf):
return self._use_qadpos_pconf(qadpos, pconf)
else:
raise ValueError("Wrong value of policy.precedence = %s" % policy.precedence)
# No qadapter could be found
raise RuntimeError("Cannot find qadapter for this run!")
|
Given a list of parallel configurations, pconfs, this method select an `optimal` configuration
according to some criterion as well as the :class:`QueueAdapter` to use.
Args:
pconfs: :class:`ParalHints` object with the list of parallel configurations
Returns:
:class:`ParallelConf` object with the `optimal` configuration.
|
def upload(self, project_id, processor_name, **fields):
"""Upload files and data objects.
:param project_id: ObjectId of Genesis project
:type project_id: string
:param processor_name: Processor object name
:type processor_name: string
:param fields: Processor field-value pairs
:type fields: args
:rtype: HTTP Response object
"""
p = self.processors(processor_name=processor_name)
if len(p) == 1:
p = p[0]
else:
Exception('Invalid processor name {}'.format(processor_name))
for field_name, field_val in fields.items():
if field_name not in p['input_schema']:
Exception("Field {} not in processor {} inputs".format(field_name, p['name']))
if find_field(p['input_schema'], field_name)['type'].startswith('basic:file:'):
if not os.path.isfile(field_val):
Exception("File {} not found".format(field_val))
inputs = {}
for field_name, field_val in fields.items():
if find_field(p['input_schema'], field_name)['type'].startswith('basic:file:'):
file_temp = self._upload_file(field_val)
if not file_temp:
Exception("Upload failed for {}".format(field_val))
inputs[field_name] = {
'file': field_val,
'file_temp': file_temp
}
else:
inputs[field_name] = field_val
d = {
'status': 'uploading',
'case_ids': [project_id],
'processor_name': processor_name,
'input': inputs,
}
return self.create(d)
|
Upload files and data objects.
:param project_id: ObjectId of Genesis project
:type project_id: string
:param processor_name: Processor object name
:type processor_name: string
:param fields: Processor field-value pairs
:type fields: args
:rtype: HTTP Response object
|
def _executable_memory_regions(self, objects=None, force_segment=False):
"""
Get all executable memory regions from the binaries
:param objects: A collection of binary objects to collect regions from. If None, regions from all project
binary objects are used.
:param bool force_segment: Rely on binary segments instead of sections.
:return: A sorted list of tuples (beginning_address, end_address)
"""
if objects is None:
binaries = self.project.loader.all_objects
else:
binaries = objects
memory_regions = [ ]
for b in binaries:
if isinstance(b, ELF):
# If we have sections, we get result from sections
if not force_segment and b.sections:
# Get all executable sections
for section in b.sections:
if section.is_executable:
tpl = (section.min_addr, section.max_addr)
memory_regions.append(tpl)
else:
# Get all executable segments
for segment in b.segments:
if segment.is_executable:
tpl = (segment.min_addr, segment.max_addr)
memory_regions.append(tpl)
elif isinstance(b, PE):
for section in b.sections:
if section.is_executable:
tpl = (section.min_addr, section.max_addr)
memory_regions.append(tpl)
elif isinstance(b, MachO):
if b.segments:
# Get all executable segments
for seg in b.segments:
if seg.is_executable:
# Take all sections from this segment (MachO style)
for section in seg.sections:
tpl = (section.min_addr, section.max_addr)
memory_regions.append(tpl)
elif isinstance(b, Blob):
# a blob is entirely executable
tpl = (b.min_addr, b.max_addr)
memory_regions.append(tpl)
elif isinstance(b, self._cle_pseudo_objects):
pass
else:
l.warning('Unsupported object format "%s". Treat it as an executable.', b.__class__.__name__)
tpl = (b.min_addr, b.max_addr)
memory_regions.append(tpl)
if not memory_regions:
memory_regions = [(start, start + len(backer)) for start, backer in self.project.loader.memory.backers()]
memory_regions = sorted(memory_regions, key=lambda x: x[0])
return memory_regions
|
Get all executable memory regions from the binaries
:param objects: A collection of binary objects to collect regions from. If None, regions from all project
binary objects are used.
:param bool force_segment: Rely on binary segments instead of sections.
:return: A sorted list of tuples (beginning_address, end_address)
|
def _parse_persons(self, datafield, subfield, roles=["aut"]):
"""
Parse persons from given datafield.
Args:
datafield (str): code of datafield ("010", "730", etc..)
subfield (char): code of subfield ("a", "z", "4", etc..)
role (list of str): set to ["any"] for any role, ["aut"] for
authors, etc.. For details see
http://www.loc.gov/marc/relators/relaterm.html
Main records for persons are: "100", "600" and "700", subrecords "c".
Returns:
list: Person objects.
"""
# parse authors
parsed_persons = []
raw_persons = self.get_subfields(datafield, subfield)
for person in raw_persons:
# check if person have at least one of the roles specified in
# 'roles' parameter of function
other_subfields = person.other_subfields
if "4" in other_subfields and roles != ["any"]:
person_roles = other_subfields["4"] # list of role parameters
relevant = any(map(lambda role: role in roles, person_roles))
# skip non-relevant persons
if not relevant:
continue
# result of .strip() is string, so ind1/2 in MARCSubrecord are lost
ind1 = person.i1
ind2 = person.i2
person = person.strip()
name = ""
second_name = ""
surname = ""
title = ""
# here it gets nasty - there is lot of options in ind1/ind2
# parameters
if ind1 == "1" and ind2 == " ":
if "," in person:
surname, name = person.split(",", 1)
elif " " in person:
surname, name = person.split(" ", 1)
else:
surname = person
if "c" in other_subfields:
title = ",".join(other_subfields["c"])
elif ind1 == "0" and ind2 == " ":
name = person.strip()
if "b" in other_subfields:
second_name = ",".join(other_subfields["b"])
if "c" in other_subfields:
surname = ",".join(other_subfields["c"])
elif ind1 == "1" and ind2 == "0" or ind1 == "0" and ind2 == "0":
name = person.strip()
if "c" in other_subfields:
title = ",".join(other_subfields["c"])
parsed_persons.append(
Person(
name.strip(),
second_name.strip(),
surname.strip(),
title.strip()
)
)
return parsed_persons
|
Parse persons from given datafield.
Args:
datafield (str): code of datafield ("010", "730", etc..)
subfield (char): code of subfield ("a", "z", "4", etc..)
role (list of str): set to ["any"] for any role, ["aut"] for
authors, etc.. For details see
http://www.loc.gov/marc/relators/relaterm.html
Main records for persons are: "100", "600" and "700", subrecords "c".
Returns:
list: Person objects.
|
def getRaw(self, context, aslist=False, **kwargs):
"""Grab the stored value, and return it directly as UIDs.
:param context: context is the object who's schema contains this field.
:type context: BaseContent
:param aslist: Forces a single-valued field to return a list type.
:type aslist: bool
:param kwargs: kwargs are passed directly to the underlying get.
:type kwargs: dict
:return: UID or list of UIDs for multiValued fields.
:rtype: string | list[string]
"""
value = StringField.get(self, context, **kwargs)
if not value:
return [] if self.multiValued else None
if self.multiValued:
ret = value
else:
ret = self.get_uid(context, value)
if aslist:
ret = [ret]
return ret
|
Grab the stored value, and return it directly as UIDs.
:param context: context is the object who's schema contains this field.
:type context: BaseContent
:param aslist: Forces a single-valued field to return a list type.
:type aslist: bool
:param kwargs: kwargs are passed directly to the underlying get.
:type kwargs: dict
:return: UID or list of UIDs for multiValued fields.
:rtype: string | list[string]
|
def deserialize(self, to_deserialize: PrimitiveJsonType) \
-> Optional[Union[SerializableType, List[SerializableType]]]:
"""
Deserializes the given representation of the serialized object.
:param to_deserialize: the serialized object as a dictionary
:return: the deserialized object or collection of deserialized objects
"""
if to_deserialize is None:
# Implements #17
return None
elif isinstance(to_deserialize, List):
deserialized = []
for item in to_deserialize:
item_deserialized = self.deserialize(item)
deserialized.append(item_deserialized)
return deserialized
else:
mappings_not_set_in_constructor = [] # type: List[PropertyMapping]
init_kwargs = dict() # type: Dict[str, Any]
for mapping in self._property_mappings:
if mapping.object_constructor_parameter_name is not None:
value = mapping.serialized_property_getter(to_deserialize)
if not (mapping.optional and value is None):
decoded_value = self._deserialize_property_value(value, mapping.deserializer_cls)
if isinstance(decoded_value, list):
collection = mapping.collection_factory(decoded_value)
decoded_value = collection
argument = mapping.object_constructor_argument_modifier(decoded_value)
init_kwargs[mapping.object_constructor_parameter_name] = argument
else:
mappings_not_set_in_constructor.append(mapping)
decoded = self._deserializable_cls(**init_kwargs)
assert type(decoded) == self._deserializable_cls
for mapping in mappings_not_set_in_constructor:
assert mapping.object_constructor_parameter_name is None
if mapping.serialized_property_getter is not None and mapping.object_property_setter is not None:
value = mapping.serialized_property_getter(to_deserialize)
if not (mapping.optional and value is None):
decoded_value = self._deserialize_property_value(value, mapping.deserializer_cls)
if isinstance(decoded_value, list):
collection = mapping.collection_factory(decoded_value)
decoded_value = collection
mapping.object_property_setter(decoded, decoded_value)
return decoded
|
Deserializes the given representation of the serialized object.
:param to_deserialize: the serialized object as a dictionary
:return: the deserialized object or collection of deserialized objects
|
def unregister_directory(self, directory_node, raise_exception=False):
"""
Unregisters given :class:`umbra.components.factory.script_editor.nodes.DirectoryNode` class Node from the Model.
:param directory_node: DirectoryNode to unregister.
:type directory_node: DirectoryNode
:param raise_exception: Raise the exception.
:type raise_exception: bool
:return: DirectoryNode.
:rtype: DirectoryNode
"""
if raise_exception:
if not directory_node in self.list_directory_nodes():
raise foundations.exceptions.ProgrammingError(
"{0} | '{1}' directory 'DirectoryNode' isn't registered!".format(
self.__class__.__name__, directory_node))
LOGGER.debug("> Unregistering '{0}' directory 'DirectoryNode'.".format(directory_node))
parent = directory_node.parent
row = directory_node.row()
self.beginRemoveRows(self.get_node_index(parent), row, row)
parent.remove_child(row)
self.endRemoveRows()
self.directory_unregistered.emit(directory_node)
return directory_node
|
Unregisters given :class:`umbra.components.factory.script_editor.nodes.DirectoryNode` class Node from the Model.
:param directory_node: DirectoryNode to unregister.
:type directory_node: DirectoryNode
:param raise_exception: Raise the exception.
:type raise_exception: bool
:return: DirectoryNode.
:rtype: DirectoryNode
|
def import_(module_name, name):
"""Imports an object by a relative module path::
Profiler = import_('profiling.profiler', 'Profiler')
"""
module = importlib.import_module(module_name, __package__)
return getattr(module, name)
|
Imports an object by a relative module path::
Profiler = import_('profiling.profiler', 'Profiler')
|
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
|
Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
|
def lyric(id):
"""通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
"""
if id is None:
raise ParamsError()
r = NCloudBot()
r.method = 'LYRIC'
r.params = {'id': id}
r.send()
return r.response
|
通过歌曲 ID 获取歌曲歌词地址
:param id: 歌曲ID
|
def get_sample_value(self, name, labels=None):
"""Returns the sample value, or None if not found.
This is inefficient, and intended only for use in unittests.
"""
if labels is None:
labels = {}
for metric in self.collect():
for s in metric.samples:
if s.name == name and s.labels == labels:
return s.value
return None
|
Returns the sample value, or None if not found.
This is inefficient, and intended only for use in unittests.
|
def post_predictions(self, document_id: str, model_name: str) -> dict:
"""Run inference and create a prediction, calls the POST /predictions endpoint.
>>> from las import Client
>>> client = Client(endpoint='<api endpoint>')
>>> client.post_predictions(document_id='<document id>', model_name='invoice')
:param document_id: The document id to run inference and create a prediction on
:type document_id: str
:param model_name: The name of the model to use for inference
:type model_name: str
:return: Prediction on document
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
"""
body = json.dumps({'documentId': document_id, 'modelName': model_name}).encode()
uri, headers = self._create_signing_headers('POST', '/predictions', body)
post_predictions_response = requests.post(
url=uri.geturl(),
headers=headers,
data=body
)
response = _json_decode(post_predictions_response)
return response
|
Run inference and create a prediction, calls the POST /predictions endpoint.
>>> from las import Client
>>> client = Client(endpoint='<api endpoint>')
>>> client.post_predictions(document_id='<document id>', model_name='invoice')
:param document_id: The document id to run inference and create a prediction on
:type document_id: str
:param model_name: The name of the model to use for inference
:type model_name: str
:return: Prediction on document
:rtype: dict
:raises InvalidCredentialsException: If the credentials are invalid
:raises TooManyRequestsException: If limit of requests per second is reached
:raises LimitExceededException: If limit of total requests per month is reached
:raises requests.exception.RequestException: If error was raised by requests
|
def get_consumed_read_units_percent(
table_name, lookback_window_start=15, lookback_period=5):
""" Returns the number of consumed read units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Number of consumed reads as a
percentage of provisioned reads
"""
try:
metrics = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ConsumedReadCapacityUnits')
except BotoServerError:
raise
if metrics:
lookback_seconds = lookback_period * 60
consumed_read_units = (
float(metrics[0]['Sum']) / float(lookback_seconds))
else:
consumed_read_units = 0
try:
table_read_units = dynamodb.get_provisioned_table_read_units(
table_name)
consumed_read_units_percent = (
float(consumed_read_units) /
float(table_read_units) * 100)
except JSONResponseError:
raise
logger.info('{0} - Consumed read units: {1:.2f}%'.format(
table_name, consumed_read_units_percent))
return consumed_read_units_percent
|
Returns the number of consumed read units in percent
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_period: Number of minutes to look at
:returns: float -- Number of consumed reads as a
percentage of provisioned reads
|
def document_agents(p):
"""
Document agents in AIKIF (purpose and intent)
"""
p.comment('agent.py', 'base agent class')
p.comment('run_agents.py', 'Top level function to run the agents')
p.comment('agent_image_metadata.py', 'agent to collect file picture metadata')
p.comment('agent_learn_aixi.py', '')
p.comment('dummy_learn_1.py', 'sample (but stub only) learning algorithm to be called as test below')
p.comment('agent_explore_grid.py', 'working prototype of agent to move through a grid world, using very simple path finding.')
p.comment('agent_email.py', 'Agent that reads emails (currently only gmail)')
p.comment('agent_filelist.py', 'TOK - correctly scans and logs filelists from an agent')
p.comment('collect_Win_processes.py', 'script to collect windows processes. Currently not part of agent process, more an exercise on what can be logged')
p.comment('log_PC_usage.py', 'script to read current window title to be used as part of context to see what user is doing')
p.comment('log_browser_history.py', 'script to dump chrome browser history to CSV - not used')
p.comment('agg_context.py', 'detects context of user and computer')
|
Document agents in AIKIF (purpose and intent)
|
def insert_cylinder(im, xyz0, xyz1, r):
r"""
Inserts a cylinder of given radius onto a given image
Parameters
----------
im : array_like
Original voxelated image
xyz0, xyz1 : 3-by-1 array_like
Voxel coordinates of the two end points of the cylinder
r : int
Radius of the cylinder
Returns
-------
im : ND-array
Original voxelated image overlayed with the cylinder
Notes
-----
This function is only implemented for 3D images
"""
if im.ndim != 3:
raise Exception('This function is only implemented for 3D images')
# Converting coordinates to numpy array
xyz0, xyz1 = [sp.array(xyz).astype(int) for xyz in (xyz0, xyz1)]
r = int(r)
L = sp.absolute(xyz0 - xyz1).max() + 1
xyz_line = [sp.linspace(xyz0[i], xyz1[i], L).astype(int) for i in range(3)]
xyz_min = sp.amin(xyz_line, axis=1) - r
xyz_max = sp.amax(xyz_line, axis=1) + r
shape_template = xyz_max - xyz_min + 1
template = sp.zeros(shape=shape_template)
# Shortcut for orthogonal cylinders
if (xyz0 == xyz1).sum() == 2:
unique_dim = [xyz0[i] != xyz1[i] for i in range(3)].index(True)
shape_template[unique_dim] = 1
template_2D = disk(radius=r).reshape(shape_template)
template = sp.repeat(template_2D, repeats=L, axis=unique_dim)
xyz_min[unique_dim] += r
xyz_max[unique_dim] += -r
else:
xyz_line_in_template_coords = [xyz_line[i] - xyz_min[i] for i in range(3)]
template[tuple(xyz_line_in_template_coords)] = 1
template = spim.distance_transform_edt(template == 0) <= r
im[xyz_min[0]:xyz_max[0]+1,
xyz_min[1]:xyz_max[1]+1,
xyz_min[2]:xyz_max[2]+1] += template
return im
|
r"""
Inserts a cylinder of given radius onto a given image
Parameters
----------
im : array_like
Original voxelated image
xyz0, xyz1 : 3-by-1 array_like
Voxel coordinates of the two end points of the cylinder
r : int
Radius of the cylinder
Returns
-------
im : ND-array
Original voxelated image overlayed with the cylinder
Notes
-----
This function is only implemented for 3D images
|
def update_pos(self, pos_id, name, pos_type, location=None):
"""Update POS resource. Returns the raw response object.
Arguments:
pos_id:
POS id as chosen on registration
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location
"""
arguments = {'name': name,
'type': pos_type,
'location': location}
return self.do_req('PUT',
self.merchant_api_base_url + '/pos/' +
pos_id + '/', arguments)
|
Update POS resource. Returns the raw response object.
Arguments:
pos_id:
POS id as chosen on registration
name:
Human-readable name of the POS, used for displaying payment
request origin to end user
pos_type:
POS type
location:
Merchant location
|
def _subtract_timedelta(self, delta):
"""
Remove timedelta duration from the instance.
:param delta: The timedelta instance
:type delta: pendulum.Duration or datetime.timedelta
:rtype: DateTime
"""
if isinstance(delta, pendulum.Duration):
return self.subtract(
years=delta.years,
months=delta.months,
weeks=delta.weeks,
days=delta.remaining_days,
hours=delta.hours,
minutes=delta.minutes,
seconds=delta.remaining_seconds,
microseconds=delta.microseconds,
)
return self.subtract(
days=delta.days, seconds=delta.seconds, microseconds=delta.microseconds
)
|
Remove timedelta duration from the instance.
:param delta: The timedelta instance
:type delta: pendulum.Duration or datetime.timedelta
:rtype: DateTime
|
def LMLgrad(self,params=None):
"""
evaluates the gradient of the log marginal likelihood for the given hyperparameters
"""
if params is not None:
self.setParams(params)
KV = self._update_cache()
W = KV['W']
LMLgrad = SP.zeros(self.covar.n_params)
for i in range(self.covar.n_params):
Kd = self.covar.Kgrad_param(i)
LMLgrad[i] = 0.5 * (W*Kd).sum()
return {'covar':LMLgrad}
|
evaluates the gradient of the log marginal likelihood for the given hyperparameters
|
def grants(self):
"""
Returns grants for the current user
"""
from linode_api4.objects.account import UserGrants
resp = self._client.get('/profile/grants') # use special endpoint for restricted users
grants = None
if resp is not None:
# if resp is None, we're unrestricted and do not have grants
grants = UserGrants(self._client, self.username, resp)
return grants
|
Returns grants for the current user
|
def get_function_doc(function, config=default_config):
"""Return doc for a function."""
if config.exclude_function:
for ex in config.exclude_function:
if ex.match(function.__name__):
return None
return _doc_object(function, 'function', config=config)
|
Return doc for a function.
|
def has_valid_dispatch_view_docs(endpoint):
"""
Return True if dispatch_request is swaggable
"""
klass = endpoint.__dict__.get('view_class', None)
return klass and hasattr(klass, 'dispatch_request') \
and hasattr(endpoint, 'methods') \
and getattr(klass, 'dispatch_request').__doc__
|
Return True if dispatch_request is swaggable
|
def getRow(leftU, rightV, jVec):
'''
Compute X_{\geq \mu}^T \otimes X_{leq \mu}
X_{\geq \mu} = V_{\mu+1}(j_{\mu}) \ldots V_{d} (j_{d}) [left interface matrix]
X_{\leq \mu} = U_{1} (j_{1}) \ldots U_{\mu-1}(j_{\mu-1}) [right interface matrix]
Parameters:
:list of numpy.arrays: leftU
left-orthogonal cores from 1 to \mu-1
:list of numpy.arrays: rightV
right-orthogonal cores from \mu+1 to d
:list, tuple, np.array: jVec
indices for each dimension n[k]
Returns:
:numpy.array: result
Kronecker product between left and right interface
matrices. Left matrix is transposed.
'''
jLeft = None
jRight = None
if len(leftU) > 0:
jLeft = jVec[:len(leftU)]
if len(rightV) > 0:
jRight = jVec[-len(rightV):]
multU = np.ones([1,1])
for k in xrange(len(leftU)):
multU = np.dot(multU, leftU[k][:, jLeft[k], :])
multV= np.ones([1,1])
for k in xrange(len(rightV)-1, -1, -1):
multV = np.dot(rightV[k][:, jRight[k], :], multV)
result = np.kron(multV.T, multU)
return result
|
Compute X_{\geq \mu}^T \otimes X_{leq \mu}
X_{\geq \mu} = V_{\mu+1}(j_{\mu}) \ldots V_{d} (j_{d}) [left interface matrix]
X_{\leq \mu} = U_{1} (j_{1}) \ldots U_{\mu-1}(j_{\mu-1}) [right interface matrix]
Parameters:
:list of numpy.arrays: leftU
left-orthogonal cores from 1 to \mu-1
:list of numpy.arrays: rightV
right-orthogonal cores from \mu+1 to d
:list, tuple, np.array: jVec
indices for each dimension n[k]
Returns:
:numpy.array: result
Kronecker product between left and right interface
matrices. Left matrix is transposed.
|
def export(self, input_stats=None):
"""Export all the stats.
Each export module is ran in a dedicated thread.
"""
# threads = []
input_stats = input_stats or {}
for e in self._exports:
logger.debug("Export stats using the %s module" % e)
thread = threading.Thread(target=self._exports[e].update,
args=(input_stats,))
# threads.append(thread)
thread.start()
|
Export all the stats.
Each export module is ran in a dedicated thread.
|
def t_COMMA(self, t):
r','
t.endlexpos = t.lexpos + len(t.value)
return t
|
r',
|
def client(self):
"""Returns client session object"""
if self._client is None:
self._client = get_session(self.user_agent)
return self._client
|
Returns client session object
|
def from_xyz_string(xyz_string):
"""
Args:
xyz_string: string of the form 'x, y, z', '-x, -y, z',
'-2y+1/2, 3x+1/2, z-y+1/2', etc.
Returns:
SymmOp
"""
rot_matrix = np.zeros((3, 3))
trans = np.zeros(3)
toks = xyz_string.strip().replace(" ", "").lower().split(",")
re_rot = re.compile(r"([+-]?)([\d\.]*)/?([\d\.]*)([x-z])")
re_trans = re.compile(r"([+-]?)([\d\.]+)/?([\d\.]*)(?![x-z])")
for i, tok in enumerate(toks):
# build the rotation matrix
for m in re_rot.finditer(tok):
factor = -1 if m.group(1) == "-" else 1
if m.group(2) != "":
factor *= float(m.group(2)) / float(m.group(3)) \
if m.group(3) != "" else float(m.group(2))
j = ord(m.group(4)) - 120
rot_matrix[i, j] = factor
# build the translation vector
for m in re_trans.finditer(tok):
factor = -1 if m.group(1) == "-" else 1
num = float(m.group(2)) / float(m.group(3)) \
if m.group(3) != "" else float(m.group(2))
trans[i] = num * factor
return SymmOp.from_rotation_and_translation(rot_matrix, trans)
|
Args:
xyz_string: string of the form 'x, y, z', '-x, -y, z',
'-2y+1/2, 3x+1/2, z-y+1/2', etc.
Returns:
SymmOp
|
def _validate_namespace(self, namespace):
"""
Validate whether a CIM namespace exists in the mock repository.
Parameters:
namespace (:term:`string`):
The name of the CIM namespace in the mock repository. Must not be
`None`.
Raises:
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
"""
if namespace not in self.namespaces:
raise CIMError(
CIM_ERR_INVALID_NAMESPACE,
_format("Namespace does not exist in mock repository: {0!A}",
namespace))
|
Validate whether a CIM namespace exists in the mock repository.
Parameters:
namespace (:term:`string`):
The name of the CIM namespace in the mock repository. Must not be
`None`.
Raises:
:exc:`~pywbem.CIMError`: CIM_ERR_INVALID_NAMESPACE: Namespace does
not exist.
|
def main():
"""Command-Mode: Retrieve and display data then process commands."""
(cred, providers) = config_read()
cmd_mode = True
conn_objs = cld.get_conns(cred, providers)
while cmd_mode:
nodes = cld.get_data(conn_objs, providers)
node_dict = make_node_dict(nodes, "name")
idx_tbl = table.indx_table(node_dict, True)
cmd_mode = ui.ui_main(idx_tbl, node_dict)
print("\033[?25h")
|
Command-Mode: Retrieve and display data then process commands.
|
def result(self, value):
"""The result of the command."""
if self._process_result:
self._result = self._process_result(value)
self._raw_result = value
|
The result of the command.
|
def get_parameter(self, parameter):
"Return a dict for given parameter"
parameter = self._get_parameter_name(parameter)
return self._parameters[parameter]
|
Return a dict for given parameter
|
def __expect(self, exp='> ', timeout=None):
"""will wait for exp to be returned from nodemcu or timeout"""
timeout_before = self._port.timeout
timeout = timeout or self._timeout
#do NOT set timeout on Windows
if SYSTEM != 'Windows':
# Checking for new data every 100us is fast enough
if self._port.timeout != MINIMAL_TIMEOUT:
self._port.timeout = MINIMAL_TIMEOUT
end = time.time() + timeout
# Finish as soon as either exp matches or we run out of time (work like dump, but faster on success)
data = ''
while not data.endswith(exp) and time.time() <= end:
data += self._port.read()
log.debug('expect returned: `{0}`'.format(data))
if time.time() > end:
raise CommunicationTimeout('Timeout waiting for data', data)
if not data.endswith(exp) and len(exp) > 0:
raise BadResponseException('Bad response.', exp, data)
if SYSTEM != 'Windows':
self._port.timeout = timeout_before
return data
|
will wait for exp to be returned from nodemcu or timeout
|
def set_fig_y_label(self, ylabel, **kwargs):
"""Set overall figure y.
Set label for y axis on overall figure. This is not for a specific plot.
It will place the label on the figure at the left with a call to ``fig.text``.
Args:
ylabel (str): ylabel for entire figure.
Keyword Arguments:
x/y (float, optional): The x/y location of the text in figure coordinates.
Defaults are 0.45 for x and 0.02 for y.
horizontalalignment/ha (str, optional): The horizontal alignment of
the text relative to (x, y). Optionas are 'center', 'left', or 'right'.
Default is 'center'.
verticalalignment/va (str, optional): The vertical alignment of the text
relative to (x, y). Optionas are 'top', 'center', 'bottom',
or 'baseline'. Default is 'top'.
fontsize/size (int): The font size of the text. Default is 20.
rotation (float or str): Rotation of label. Options are angle in degrees,
`horizontal`, or `vertical`. Default is `horizontal`.
Note: Other kwargs are available.
See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext
"""
prop_default = {
'x': 0.45,
'y': 0.02,
'fontsize': 20,
'rotation': 'horizontal',
'ha': 'center',
}
for prop, default in prop_default.items():
kwargs[prop] = kwargs.get(prop, default)
self._set_fig_label('y', ylabel, **kwargs)
return
|
Set overall figure y.
Set label for y axis on overall figure. This is not for a specific plot.
It will place the label on the figure at the left with a call to ``fig.text``.
Args:
ylabel (str): ylabel for entire figure.
Keyword Arguments:
x/y (float, optional): The x/y location of the text in figure coordinates.
Defaults are 0.45 for x and 0.02 for y.
horizontalalignment/ha (str, optional): The horizontal alignment of
the text relative to (x, y). Optionas are 'center', 'left', or 'right'.
Default is 'center'.
verticalalignment/va (str, optional): The vertical alignment of the text
relative to (x, y). Optionas are 'top', 'center', 'bottom',
or 'baseline'. Default is 'top'.
fontsize/size (int): The font size of the text. Default is 20.
rotation (float or str): Rotation of label. Options are angle in degrees,
`horizontal`, or `vertical`. Default is `horizontal`.
Note: Other kwargs are available.
See https://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.figtext
|
def remove(self, document_id, namespace, timestamp):
"""Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
"""
database, coll = self._db_and_collection(namespace)
meta_collection = self._get_meta_collection(namespace)
doc2 = self.meta_database[meta_collection].find_one_and_delete(
{self.id_field: document_id}
)
if doc2 and doc2.get("gridfs_id"):
GridFS(self.mongo[database], coll).delete(doc2["gridfs_id"])
else:
self.mongo[database][coll].delete_one({"_id": document_id})
|
Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.