code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def threshold_otsu(image, multiplier=1.0):
"""Return image thresholded using Otsu's method.
"""
otsu_value = skimage.filters.threshold_otsu(image)
return image > otsu_value * multiplier
|
Return image thresholded using Otsu's method.
|
def has_changed_since_last_deploy(file_path, bucket):
"""
Checks if a file has changed since the last time it was deployed.
:param file_path: Path to file which should be checked. Should be relative
from root of bucket.
:param bucket_name: Name of S3 bucket to check against.
:returns: True if the file has changed, else False.
"""
msg = "Checking if {0} has changed since last deploy.".format(file_path)
logger.debug(msg)
with open(file_path) as f:
data = f.read()
file_md5 = hashlib.md5(data.encode('utf-8')).hexdigest()
logger.debug("file_md5 is {0}".format(file_md5))
key = bucket.get_key(file_path)
# HACK: Boto's md5 property does not work when the file hasn't been
# downloaded. The etag works but will break for multi-part uploaded files.
# http://stackoverflow.com/questions/16872679/how-to-programmatically-
# get-the-md5-checksum-of-amazon-s3-file-using-boto/17607096#17607096
# Also the double quotes around it must be stripped. Sketchy...boto's fault
if key:
key_md5 = key.etag.replace('"', '').strip()
logger.debug("key_md5 is {0}".format(key_md5))
else:
logger.debug("File does not exist in bucket")
return True
if file_md5 == key_md5:
logger.debug("File has not changed.")
return False
logger.debug("File has changed.")
return True
|
Checks if a file has changed since the last time it was deployed.
:param file_path: Path to file which should be checked. Should be relative
from root of bucket.
:param bucket_name: Name of S3 bucket to check against.
:returns: True if the file has changed, else False.
|
def mmGetMetricSequencesPredictedActiveCellsShared(self):
"""
Metric for number of sequences each predicted => active cell appears in
Note: This metric is flawed when it comes to high-order sequences.
@return (Metric) metric
"""
self._mmComputeTransitionTraces()
numSequencesForCell = defaultdict(lambda: 0)
for predictedActiveCells in (
self._mmData["predictedActiveCellsForSequence"].values()):
for cell in predictedActiveCells:
numSequencesForCell[cell] += 1
return Metric(self,
"# sequences each predicted => active cells appears in",
numSequencesForCell.values())
|
Metric for number of sequences each predicted => active cell appears in
Note: This metric is flawed when it comes to high-order sequences.
@return (Metric) metric
|
def latex(self):
"""Return LaTeX representation of the abstract."""
s = ('{authors}, \\textit{{{title}}}, {journal}, {volissue}, '
'{pages}, ({date}). {doi}, {scopus_url}.')
if len(self.authors) > 1:
authors = ', '.join([str(a.given_name) +
' ' + str(a.surname)
for a in self.authors[0:-1]])
authors += (' and ' +
str(self.authors[-1].given_name) +
' ' + str(self.authors[-1].surname))
else:
a = self.authors[0]
authors = str(a.given_name) + ' ' + str(a.surname)
title = self.title
journal = self.publicationName
volume = self.volume
issue = self.issueIdentifier
if volume and issue:
volissue = '\\textbf{{{0}({1})}}'.format(volume, issue)
elif volume:
volissue = '\\textbf{{0}}'.format(volume)
else:
volissue = 'no volume'
date = self.coverDate
if self.pageRange:
pages = 'p. {0}'.format(self.pageRange)
elif self.startingPage:
pages = 'p. {self.startingPage}'.format(self)
elif self.article_number:
pages = 'Art. No. {self.article_number}, '.format(self)
else:
pages = '(no pages found)'
doi = '\\href{{https://doi.org/{0}}}{{doi:{0}}}'.format(self.doi)
scopus_url = '\\href{{{0}}}{{scopus:{1}}}'.format(self.scopus_url,
self.eid)
return s.format(**locals())
|
Return LaTeX representation of the abstract.
|
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(LibratoHandler, self).get_default_config()
config.update({
'user': '',
'apikey': '',
'apply_metric_prefix': False,
'queue_max_size': 300,
'queue_max_interval': 60,
'include_filters': ['^.*'],
})
return config
|
Return the default config for the handler
|
def QCapsulate(self, widget, name, blocking = False, nude = False):
"""Helper function that encapsulates QWidget into a QMainWindow
"""
class QuickWindow(QtWidgets.QMainWindow):
class Signals(QtCore.QObject):
close = QtCore.Signal()
show = QtCore.Signal()
def __init__(self, blocking = False, parent = None, nude = False):
super().__init__(parent)
self.propagate = True # send signals or not
self.setStyleSheet(style.main_gui)
if (blocking):
self.setWindowModality(QtCore.Qt.ApplicationModal)
if (nude):
# http://doc.qt.io/qt-5/qt.html#WindowType-enum
# TODO: create a widget for a proper splashscreen (omitting X11 and centering manually)
# self.setWindowFlags(QtCore.Qt.Popup) # Qt 5.9+ : setFlags()
# self.setWindowFlags(QtCore.Qt.SplashScreen | QtCore.Qt.WindowStaysOnTopHint)
self.setWindowFlags(QtCore.Qt.Dialog)
self.signals = self.Signals()
def closeEvent(self, e):
if (self.propagate):
self.signals.close.emit()
e.accept()
def showEvent(self, e):
if (self.propagate):
self.signals.show.emit()
e.accept()
def setPropagate(self):
self.propagate = True
def unSetPropagate(self):
self.propagate = False
win = QuickWindow(blocking = blocking, nude = nude)
win.setCentralWidget(widget)
win.setLayout(QtWidgets.QHBoxLayout())
win.setWindowTitle(name)
return win
|
Helper function that encapsulates QWidget into a QMainWindow
|
def rand_article(num_p=(4, 10), num_s=(2, 15), num_w=(5, 40)):
"""Random article text.
Example::
>>> rand_article()
...
"""
article = list()
for _ in range(random.randint(*num_p)):
p = list()
for _ in range(random.randint(*num_s)):
s = list()
for _ in range(random.randint(*num_w)):
s.append(
rand_str(random.randint(1, 15), string.ascii_lowercase))
p.append(" ".join(s))
article.append(". ".join(p))
return "\n\n".join(article)
|
Random article text.
Example::
>>> rand_article()
...
|
def main():
"""
NAME
lowrie_magic.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie_magic.py -h [command line options]
INPUT
takes measurements formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is magic_measurements.txt
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav saves plots and quits
-DM [2, 3] MagIC data model number
"""
if '-h' in sys.argv:
print(main.__doc__)
sys.exit()
if len(sys.argv) <= 1:
print(main.__doc__)
print('you must supply a file name')
sys.exit()
FIG = {} # plot dictionary
FIG['lowrie'] = 1 # demag is figure 1
pmagplotlib.plot_init(FIG['lowrie'], 6, 6)
norm = 1 # default is to normalize by maximum axis
in_file = pmag.get_named_arg("-f", "measurements.txt")
dir_path = pmag.get_named_arg("-WD", ".")
in_file = pmag.resolve_file_name(in_file, dir_path)
data_model = pmag.get_named_arg("-DM", 3)
data_model = int(float(data_model))
fmt = pmag.get_named_arg("-fmt", "svg")
if '-N' in sys.argv:
norm = 0 # don't normalize
if '-sav' in sys.argv:
plot = 1 # silently save and quit
else:
plot = 0 # generate plots
print(in_file)
# read in data
PmagRecs, file_type = pmag.magic_read(in_file)
if data_model == 2 and file_type != "magic_measurements":
print('bad input file', file_type)
sys.exit()
if data_model == 3 and file_type != "measurements":
print('bad input file', file_type)
sys.exit()
if data_model == 2:
meth_code_col = 'magic_method_codes'
spec_col = 'er_specimen_name'
dec_col = "measurement_dec"
inc_col = 'measurement_inc'
moment_col = 'measurement_magn_moment'
temp_col = 'treatment_temp'
else:
meth_code_col = 'method_codes'
spec_col = 'specimen'
dec_col = 'dir_dec'
inc_col = 'dir_inc'
moment_col = 'magn_moment'
temp_col = "treat_temp"
PmagRecs = pmag.get_dictitem(
PmagRecs, meth_code_col, 'LP-IRM-3D', 'has') # get all 3D IRM records
if len(PmagRecs) == 0:
print('no records found with the method code LP-IRM-3D')
sys.exit()
specs = pmag.get_dictkey(PmagRecs, spec_col, '')
sids = []
for spec in specs:
if spec not in sids:
sids.append(spec) # get list of unique specimen names
for spc in sids: # step through the specimen names
print(spc)
specdata = pmag.get_dictitem(
PmagRecs, spec_col, spc, 'T') # get all this one's data
DIMs, Temps = [], []
for dat in specdata: # step through the data
DIMs.append([float(dat[dec_col]), float(
dat[inc_col]), float(dat[moment_col])])
Temps.append(float(dat[temp_col])-273.)
carts = pmag.dir2cart(DIMs).transpose()
if norm == 1: # want to normalize
nrm = (DIMs[0][2]) # normalize by NRM
ylab = "M/M_o"
else:
nrm = 1. # don't normalize
ylab = "Magnetic moment (Am^2)"
xlab = "Temperature (C)"
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[0]) / nrm, sym='r-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[0]) / nrm, sym='ro') # X direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[1]) / nrm, sym='c-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[1]) / nrm, sym='cs') # Y direction
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[2]) / nrm, sym='k-')
pmagplotlib.plot_xy(FIG['lowrie'], Temps, abs(carts[2]) / nrm, sym='k^', title=spc, xlab=xlab, ylab=ylab) # Z direction
files = {'lowrie': 'lowrie:_'+spc+'_.'+fmt}
if plot == 0:
pmagplotlib.draw_figs(FIG)
ans = input('S[a]ve figure? [q]uit, <return> to continue ')
if ans == 'a':
pmagplotlib.save_plots(FIG, files)
elif ans == 'q':
sys.exit()
else:
pmagplotlib.save_plots(FIG, files)
pmagplotlib.clearFIG(FIG['lowrie'])
|
NAME
lowrie_magic.py
DESCRIPTION
plots intensity decay curves for Lowrie experiments
SYNTAX
lowrie_magic.py -h [command line options]
INPUT
takes measurements formatted input files
OPTIONS
-h prints help message and quits
-f FILE: specify input file, default is magic_measurements.txt
-N do not normalize by maximum magnetization
-fmt [svg, pdf, eps, png] specify fmt, default is svg
-sav saves plots and quits
-DM [2, 3] MagIC data model number
|
def path_join(*args):
"""
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
"""
args = (paramiko.py3compat.u(arg) for arg in args)
return os.path.join(*args)
|
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
|
def snr_from_loglr(loglr):
"""Returns SNR computed from the given log likelihood ratio(s). This is
defined as `sqrt(2*loglr)`.If the log likelihood ratio is < 0, returns 0.
Parameters
----------
loglr : array or float
The log likelihood ratio(s) to evaluate.
Returns
-------
array or float
The SNRs computed from the log likelihood ratios.
"""
singleval = isinstance(loglr, float)
if singleval:
loglr = numpy.array([loglr])
# temporarily quiet sqrt(-1) warnings
numpysettings = numpy.seterr(invalid='ignore')
snrs = numpy.sqrt(2*loglr)
numpy.seterr(**numpysettings)
snrs[numpy.isnan(snrs)] = 0.
if singleval:
snrs = snrs[0]
return snrs
|
Returns SNR computed from the given log likelihood ratio(s). This is
defined as `sqrt(2*loglr)`.If the log likelihood ratio is < 0, returns 0.
Parameters
----------
loglr : array or float
The log likelihood ratio(s) to evaluate.
Returns
-------
array or float
The SNRs computed from the log likelihood ratios.
|
def _infer_sig_len(file_name, fmt, n_sig, dir_name, pb_dir=None):
"""
Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size
"""
if pb_dir is None:
file_size = os.path.getsize(os.path.join(dir_name, file_name))
else:
file_size = download._remote_file_size(file_name=file_name,
pb_dir=pb_dir)
sig_len = int(file_size / (BYTES_PER_SAMPLE[fmt] * n_sig))
return sig_len
|
Infer the length of a signal from a dat file.
Parameters
----------
file_name : str
Name of the dat file
fmt : str
WFDB fmt of the dat file
n_sig : int
Number of signals contained in the dat file
Notes
-----
sig_len * n_sig * bytes_per_sample == file_size
|
def expect_bounded(__funcname=_qualified_name, **named):
"""
Preprocessing decorator verifying that inputs fall INCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(1)
2
>>> foo(5)
6
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value inclusively between 1 and 5 for
argument 'x', but got 6 instead.
>>> @expect_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value greater than or equal to 2 for
argument 'x', but got 1 instead.
>>> @expect_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value less than or equal to 5 for
argument 'x', but got 6 instead.
"""
def _make_bounded_check(bounds):
(lower, upper) = bounds
if lower is None:
def should_fail(value):
return value > upper
predicate_descr = "less than or equal to " + str(upper)
elif upper is None:
def should_fail(value):
return value < lower
predicate_descr = "greater than or equal to " + str(lower)
else:
def should_fail(value):
return not (lower <= value <= upper)
predicate_descr = "inclusively between %s and %s" % bounds
template = (
"%(funcname)s() expected a value {predicate}"
" for argument '%(argname)s', but got %(actual)s instead."
).format(predicate=predicate_descr)
return make_check(
exc_type=ValueError,
template=template,
pred=should_fail,
actual=repr,
funcname=__funcname,
)
return _expect_bounded(_make_bounded_check, __funcname=__funcname, **named)
|
Preprocessing decorator verifying that inputs fall INCLUSIVELY between
bounds.
Bounds should be passed as a pair of ``(min_value, max_value)``.
``None`` may be passed as ``min_value`` or ``max_value`` to signify that
the input is only bounded above or below.
Examples
--------
>>> @expect_bounded(x=(1, 5))
... def foo(x):
... return x + 1
...
>>> foo(1)
2
>>> foo(5)
6
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value inclusively between 1 and 5 for
argument 'x', but got 6 instead.
>>> @expect_bounded(x=(2, None))
... def foo(x):
... return x
...
>>> foo(100000)
100000
>>> foo(1) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value greater than or equal to 2 for
argument 'x', but got 1 instead.
>>> @expect_bounded(x=(None, 5))
... def foo(x):
... return x
...
>>> foo(6) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
Traceback (most recent call last):
...
ValueError: ...foo() expected a value less than or equal to 5 for
argument 'x', but got 6 instead.
|
def _parse_area(self, area_xml):
"""Parses an Area tag, which is effectively a room, depending on how the
Lutron controller programming was done."""
area = Area(self._lutron,
name=area_xml.get('Name'),
integration_id=int(area_xml.get('IntegrationID')),
occupancy_group_id=area_xml.get('OccupancyGroupAssignedToID'))
for output_xml in area_xml.find('Outputs'):
output = self._parse_output(output_xml)
area.add_output(output)
# device group in our case means keypad
# device_group.get('Name') is the location of the keypad
for device_group in area_xml.find('DeviceGroups'):
if device_group.tag == 'DeviceGroup':
devs = device_group.find('Devices')
elif device_group.tag == 'Device':
devs = [device_group]
else:
_LOGGER.info("Unknown tag in DeviceGroups child %s" % devs)
devs = []
for device_xml in devs:
if device_xml.tag != 'Device':
continue
if device_xml.get('DeviceType') in (
'SEETOUCH_KEYPAD',
'SEETOUCH_TABLETOP_KEYPAD',
'PICO_KEYPAD',
'HYBRID_SEETOUCH_KEYPAD',
'MAIN_REPEATER'):
keypad = self._parse_keypad(device_xml)
area.add_keypad(keypad)
elif device_xml.get('DeviceType') == 'MOTION_SENSOR':
motion_sensor = self._parse_motion_sensor(device_xml)
area.add_sensor(motion_sensor)
#elif device_xml.get('DeviceType') == 'VISOR_CONTROL_RECEIVER':
return area
|
Parses an Area tag, which is effectively a room, depending on how the
Lutron controller programming was done.
|
def update_firmware(self, filename, component_type):
"""Updates the given firmware on the server for the given component.
:param filename: location of the raw firmware file. Extraction of the
firmware file (if in compact format) is expected to
happen prior to this invocation.
:param component_type: Type of component to be applied to.
:raises: InvalidInputError, if the validation of the input fails
:raises: IloError, on an error from iLO
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server
"""
fw_img_processor = firmware_controller.FirmwareImageUploader(filename)
LOG.debug(self._('Uploading firmware file: %s ...'), filename)
cookie = fw_img_processor.upload_file_to((self.host, self.port),
self.timeout)
LOG.debug(self._('Uploading firmware file: %s ... done'), filename)
root = self._get_firmware_update_xml_for_file_and_component(
filename, component_type)
element = root.find('LOGIN/RIB_INFO')
etree.SubElement(element, 'TPM_ENABLED', VALUE='Yes')
extra_headers = {'Cookie': cookie}
LOG.debug(self._('Flashing firmware file: %s ...'), filename)
d = self._request_ilo(root, extra_headers=extra_headers)
# wait till the firmware update completes.
common.wait_for_ribcl_firmware_update_to_complete(self)
self._parse_output(d)
LOG.info(self._('Flashing firmware file: %s ... done'), filename)
|
Updates the given firmware on the server for the given component.
:param filename: location of the raw firmware file. Extraction of the
firmware file (if in compact format) is expected to
happen prior to this invocation.
:param component_type: Type of component to be applied to.
:raises: InvalidInputError, if the validation of the input fails
:raises: IloError, on an error from iLO
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, if the command is
not supported on the server
|
def read_file_snippets(file, snippet_store):
"""Parse a file and add all snippets to the snippet_store dictionary"""
start_reg = re.compile("(.*%%SNIPPET_START%% )([a-zA-Z0-9]+)")
end_reg = re.compile("(.*%%SNIPPET_END%% )([a-zA-Z0-9]+)")
open_snippets = {}
with open(file, encoding="utf-8") as w:
lines = w.readlines()
for line in lines:
printd("Got Line: {}".format(line))
# Check whether we're entering or leaving a snippet
m = start_reg.match(line)
if m:
printd("Opened Snippet {}".format(m.group(2)))
if m.group(2) in snippet_store:
record_error("Repeat definition of Snippet {}".format(m.group(2)))
elif m.group(2) in open_snippets:
record_error("Snippet already opened {}".format(m.group(2)))
else:
printd("Added {} to open snippets list".format(m.group(2)))
open_snippets[m.group(2)] = []
continue
m = end_reg.match(line)
if m:
printd("Found end of Snippet {}".format(m.group(2)))
if m.group(2) not in open_snippets:
record_error("Reached Snippet End but no start")
elif m.group(2) in snippet_store:
record_error("Repeat definition of Snippet {}".format(m.group(2)))
else:
snippet_store[m.group(2)] = open_snippets[m.group(2)]
del open_snippets[m.group(2)]
continue
# If we've got this far, then we're just a normal line, so we can add this to all open snippets
for snippet in open_snippets.values():
printd("Adding Line to snippet")
snippet.append(line)
# Now, warn about any unclosed snippets
for opened in open_snippets:
record_error("Snippet {} left open - ignoring".format(opened))
|
Parse a file and add all snippets to the snippet_store dictionary
|
def write_timestamp(self, t, pack=Struct('>Q').pack):
"""
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix UTC epoch.
"""
# Double check timestamp, can't imagine why it would be signed
self._output_buffer.extend(pack(long(timegm(t.timetuple()))))
return self
|
Write out a Python datetime.datetime object as a 64-bit integer
representing seconds since the Unix UTC epoch.
|
def read_hypergraph(string):
"""
Read a graph from a XML document. Nodes and hyperedges specified in the input will be added
to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: hypergraph
@return: Hypergraph
"""
hgr = hypergraph()
dom = parseString(string)
for each_node in dom.getElementsByTagName("node"):
hgr.add_node(each_node.getAttribute('id'))
for each_node in dom.getElementsByTagName("hyperedge"):
hgr.add_hyperedge(each_node.getAttribute('id'))
dom = parseString(string)
for each_node in dom.getElementsByTagName("node"):
for each_edge in each_node.getElementsByTagName("link"):
hgr.link(str(each_node.getAttribute('id')), str(each_edge.getAttribute('to')))
return hgr
|
Read a graph from a XML document. Nodes and hyperedges specified in the input will be added
to the current graph.
@type string: string
@param string: Input string in XML format specifying a graph.
@rtype: hypergraph
@return: Hypergraph
|
def _to_list(obj):
'''
Convert snetinfo object to list
'''
ret = {}
for attr in __attrs:
if hasattr(obj, attr):
ret[attr] = getattr(obj, attr)
return ret
|
Convert snetinfo object to list
|
def determine_inside_container(self):
"""
Set self.in_container if we're inside a container
* Inside container
* Current token starts a new container
* Current token ends all containers
"""
tokenum, value = self.current.tokenum, self.current.value
ending_container = False
starting_container = False
if tokenum == OP:
# Record when we're inside a container of some sort (tuple, list, dictionary)
# So that we can care about that when determining what to do with whitespace
if value in ['(', '[', '{']:
# add to the stack because we started a list
self.containers.append(value)
starting_container = True
elif value in [')', ']', '}']:
# not necessary to check for correctness
self.containers.pop()
ending_container = True
self.just_ended_container = not len(self.containers) and ending_container
self.just_started_container = len(self.containers) == 1 and starting_container
self.in_container = len(self.containers) or self.just_ended_container or self.just_started_container
|
Set self.in_container if we're inside a container
* Inside container
* Current token starts a new container
* Current token ends all containers
|
def show_bare_metal_state_output_bare_metal_state(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_bare_metal_state = ET.Element("show_bare_metal_state")
config = show_bare_metal_state
output = ET.SubElement(show_bare_metal_state, "output")
bare_metal_state = ET.SubElement(output, "bare-metal-state")
bare_metal_state.text = kwargs.pop('bare_metal_state')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def get_all_responses(self, service_name, receive_timeout_in_seconds=None):
"""
Receive all available responses from the service as a generator.
:param service_name: The name of the service from which to receive responses
:type service_name: union[str, unicode]
:param receive_timeout_in_seconds: How long to block without receiving a message before raising
`MessageReceiveTimeout` (defaults to five seconds unless the settings are
otherwise).
:type receive_timeout_in_seconds: int
:return: A generator that yields (request ID, job response)
:rtype: generator
:raise: ConnectionError, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, StopIteration
"""
handler = self._get_handler(service_name)
return handler.get_all_responses(receive_timeout_in_seconds)
|
Receive all available responses from the service as a generator.
:param service_name: The name of the service from which to receive responses
:type service_name: union[str, unicode]
:param receive_timeout_in_seconds: How long to block without receiving a message before raising
`MessageReceiveTimeout` (defaults to five seconds unless the settings are
otherwise).
:type receive_timeout_in_seconds: int
:return: A generator that yields (request ID, job response)
:rtype: generator
:raise: ConnectionError, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, StopIteration
|
async def create_new_sticker_set(self, user_id: base.Integer, name: base.String, title: base.String,
png_sticker: typing.Union[base.InputFile, base.String], emojis: base.String,
contains_masks: typing.Union[base.Boolean, None] = None,
mask_position: typing.Union[types.MaskPosition, None] = None) -> base.Boolean:
"""
Use this method to create new sticker set owned by a user. The bot will be able to edit the created sticker set.
Source: https://core.telegram.org/bots/api#createnewstickerset
:param user_id: User identifier of created sticker set owner
:type user_id: :obj:`base.Integer`
:param name: Short name of sticker set, to be used in t.me/addstickers/ URLs (e.g., animals)
:type name: :obj:`base.String`
:param title: Sticker set title, 1-64 characters
:type title: :obj:`base.String`
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size,
dimensions must not exceed 512px, and either width or height must be exactly 512px.
:type png_sticker: :obj:`typing.Union[base.InputFile, base.String]`
:param emojis: One or more emoji corresponding to the sticker
:type emojis: :obj:`base.String`
:param contains_masks: Pass True, if a set of mask stickers should be created
:type contains_masks: :obj:`typing.Union[base.Boolean, None]`
:param mask_position: A JSON-serialized object for position where the mask should be placed on faces
:type mask_position: :obj:`typing.Union[types.MaskPosition, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
"""
mask_position = prepare_arg(mask_position)
payload = generate_payload(**locals(), exclude=['png_sticker'])
files = {}
prepare_file(payload, files, 'png_sticker', png_sticker)
result = await self.request(api.Methods.CREATE_NEW_STICKER_SET, payload, files)
return result
|
Use this method to create new sticker set owned by a user. The bot will be able to edit the created sticker set.
Source: https://core.telegram.org/bots/api#createnewstickerset
:param user_id: User identifier of created sticker set owner
:type user_id: :obj:`base.Integer`
:param name: Short name of sticker set, to be used in t.me/addstickers/ URLs (e.g., animals)
:type name: :obj:`base.String`
:param title: Sticker set title, 1-64 characters
:type title: :obj:`base.String`
:param png_sticker: Png image with the sticker, must be up to 512 kilobytes in size,
dimensions must not exceed 512px, and either width or height must be exactly 512px.
:type png_sticker: :obj:`typing.Union[base.InputFile, base.String]`
:param emojis: One or more emoji corresponding to the sticker
:type emojis: :obj:`base.String`
:param contains_masks: Pass True, if a set of mask stickers should be created
:type contains_masks: :obj:`typing.Union[base.Boolean, None]`
:param mask_position: A JSON-serialized object for position where the mask should be placed on faces
:type mask_position: :obj:`typing.Union[types.MaskPosition, None]`
:return: Returns True on success
:rtype: :obj:`base.Boolean`
|
def visit_repr(self, node, parent):
"""visit a Backquote node by returning a fresh instance of it"""
newnode = nodes.Repr(node.lineno, node.col_offset, parent)
newnode.postinit(self.visit(node.value, newnode))
return newnode
|
visit a Backquote node by returning a fresh instance of it
|
def orderrun_detail(dk_api, kitchen, pd):
"""
returns a string.
:param dk_api: -- api object
:param kitchen: string
:param pd: dict
:rtype: DKReturnCode
"""
if DKCloudCommandRunner.SUMMARY in pd:
display_summary = True
else:
display_summary = False
# always get summary information
pd[DKCloudCommandRunner.SUMMARY] = True
rc = dk_api.orderrun_detail(kitchen, pd)
s = ''
if not rc.ok() or not isinstance(rc.get_payload(), list):
s = 'Issue with getting order run details\nmessage: %s' % rc.get_message()
rc.set_message(s)
return rc
# we have a list of servings, find the right dict
serving_list = rc.get_payload()
serving = None
if DKCloudCommandRunner.ORDER_RUN_ID in pd:
order_run_id = pd[DKCloudCommandRunner.ORDER_RUN_ID]
for serv in serving_list:
if serv[DKCloudCommandRunner.ORDER_RUN_ID] == order_run_id:
serving = serv
break
elif DKCloudCommandRunner.ORDER_ID in pd:
order_id = pd[DKCloudCommandRunner.ORDER_ID]
for serv in serving_list:
if serv[DKCloudCommandRunner.ORDER_ID] == order_id:
serving = serv
break
else:
# find the newest serving
dex = -1
latest = None
for i, serving in enumerate(serving_list):
if DKCloudCommandRunner.ORDER_ID in serving and serving[DKCloudCommandRunner.ORDER_ID] > latest:
latest = serving[DKCloudCommandRunner.ORDER_ID]
dex = i
if dex != -1:
serving = serving_list[dex]
if serving is None:
rc.set(rc.DK_FAIL,
"No OrderRun information. Try using 'dk order-list -k %s' to see what is available." % kitchen)
return rc
# serving now contains the dictionary of the serving to display
# pull out the information and put it in the message string of the rc
if serving and display_summary:
s += '\nORDER RUN SUMMARY\n\n'
summary = None
if DKCloudCommandRunner.SUMMARY in serving:
summary = serving[DKCloudCommandRunner.SUMMARY]
pass
s += 'Order ID:\t%s\n' % serving[DKCloudCommandRunner.ORDER_ID]
orid_from_serving = serving[DKCloudCommandRunner.ORDER_RUN_ID]
s += 'Order Run ID:\t%s\n' % orid_from_serving
s += 'Status:\t\t%s\n' % serving['status']
s += 'Kitchen:\t%s\n' % kitchen
if summary and 'name' in summary:
s += 'Recipe:\t\t%s\n' % summary['name']
else:
s += 'Recipe:\t\t%s\n' % 'Not available'
# variation name is inside the order id, pull it out
s += 'Variation:\t%s\n' % orid_from_serving.split('#')[3]
if summary and 'start-time' in summary:
start_time = summary['start-time']
if isinstance(start_time, basestring):
s += 'Start time:\t%s\n' % summary['start-time'].split('.')[0]
else:
s += 'Start time:\t%s\n' % 'Not available 1'
else:
s += 'Start time:\t%s\n' % 'Not available 2'
run_time = None
if summary and 'total-recipe-time' in summary:
run_time = summary['total-recipe-time']
if isinstance(run_time, basestring): # Active recipes don't have a run-duration
s += 'Run duration:\t%s (H:M:S)\n' % run_time.split('.')[0]
else:
s += 'Run duration:\t%s\n' % 'Not available'
if serving and DKCloudCommandRunner.TESTRESULTS in serving and \
isinstance(serving[DKCloudCommandRunner.TESTRESULTS], basestring):
s += '\nTEST RESULTS'
s += serving[DKCloudCommandRunner.TESTRESULTS]
if serving and DKCloudCommandRunner.TIMINGRESULTS in serving and \
isinstance(serving[DKCloudCommandRunner.TIMINGRESULTS], basestring):
s += '\n\nTIMING RESULTS\n\n'
s += serving[DKCloudCommandRunner.TIMINGRESULTS]
if serving and DKCloudCommandRunner.LOGS in serving and \
isinstance(serving[DKCloudCommandRunner.LOGS], basestring):
s += '\n\nLOG\n\n'
s += DKCloudCommandRunner._decompress(serving[DKCloudCommandRunner.LOGS])
if 'status' in pd and serving and DKCloudCommandRunner.SUMMARY in serving and \
isinstance(serving[DKCloudCommandRunner.SUMMARY], dict):
s += '\nSTEP STATUS\n\n'
summary = serving[DKCloudCommandRunner.SUMMARY]
# loop through the sorted keys
for key in sorted(summary):
value = summary[key]
if isinstance(value, dict):
# node/step info is stored as a dictionary, print the node name (key) and status
if 'status' in value:
status = value['status']
else:
status = 'unknown'
s += '%s\t%s\n' % (key, status)
if serving and 'runstatus' in pd:
s += serving['status']
if serving and 'disp_order_id' in pd and DKCloudCommandRunner.ORDER_ID in serving:
s += serving[DKCloudCommandRunner.ORDER_ID]
if serving and 'disp_order_run_id' in pd and DKCloudCommandRunner.ORDER_RUN_ID in serving:
s += serving[DKCloudCommandRunner.ORDER_RUN_ID]
rc.set_message(s)
return rc
|
returns a string.
:param dk_api: -- api object
:param kitchen: string
:param pd: dict
:rtype: DKReturnCode
|
def get_txn_outputs(raw_tx_hex, output_addr_list, coin_symbol):
'''
Used to verify a transaction hex does what's expected of it.
Must supply a list of output addresses so that the library can try to
convert from script to address using both pubkey and script.
Returns a list of the following form:
[{'value': 12345, 'address': '1abc...'}, ...]
Uses @vbuterin's decoding methods.
'''
# Defensive checks:
err_msg = 'Library not able to parse %s transactions' % coin_symbol
assert lib_can_deserialize_cs(coin_symbol), err_msg
assert isinstance(output_addr_list, (list, tuple))
for output_addr in output_addr_list:
assert is_valid_address(output_addr), output_addr
output_addr_set = set(output_addr_list) # speed optimization
outputs = []
deserialized_tx = deserialize(str(raw_tx_hex))
for out in deserialized_tx.get('outs', []):
output = {'value': out['value']}
# determine if the address is a pubkey address, script address, or op_return
pubkey_addr = script_to_address(out['script'],
vbyte=COIN_SYMBOL_MAPPINGS[coin_symbol]['vbyte_pubkey'])
script_addr = script_to_address(out['script'],
vbyte=COIN_SYMBOL_MAPPINGS[coin_symbol]['vbyte_script'])
nulldata = out['script'] if out['script'][0:2] == '6a' else None
if pubkey_addr in output_addr_set:
address = pubkey_addr
output['address'] = address
elif script_addr in output_addr_set:
address = script_addr
output['address'] = address
elif nulldata:
output['script'] = nulldata
output['script_type'] = 'null-data'
else:
raise Exception('Script %s Does Not Contain a Valid Output Address: %s' % (
out['script'],
output_addr_set,
))
outputs.append(output)
return outputs
|
Used to verify a transaction hex does what's expected of it.
Must supply a list of output addresses so that the library can try to
convert from script to address using both pubkey and script.
Returns a list of the following form:
[{'value': 12345, 'address': '1abc...'}, ...]
Uses @vbuterin's decoding methods.
|
def bootstrapSampleFromData(data,weights=None,seed=0):
'''
Samples rows from the input array of data, generating a new data array with
an equal number of rows (records). Rows are drawn with equal probability
by default, but probabilities can be specified with weights (must sum to 1).
Parameters
----------
data : np.array
An array of data, with each row representing a record.
weights : np.array
A weighting array with length equal to data.shape[0].
seed : int
A seed for the random number generator.
Returns
-------
new_data : np.array
A resampled version of input data.
'''
# Set up the random number generator
RNG = np.random.RandomState(seed)
N = data.shape[0]
# Set up weights
if weights is not None:
cutoffs = np.cumsum(weights)
else:
cutoffs = np.linspace(0,1,N)
# Draw random indices
indices = np.searchsorted(cutoffs,RNG.uniform(size=N))
# Create a bootstrapped sample
new_data = deepcopy(data[indices,])
return new_data
|
Samples rows from the input array of data, generating a new data array with
an equal number of rows (records). Rows are drawn with equal probability
by default, but probabilities can be specified with weights (must sum to 1).
Parameters
----------
data : np.array
An array of data, with each row representing a record.
weights : np.array
A weighting array with length equal to data.shape[0].
seed : int
A seed for the random number generator.
Returns
-------
new_data : np.array
A resampled version of input data.
|
def __error_middleware(self, res, res_json):
"""
Middleware that raises an exception when HTTP statuscode is an error code.
"""
if(res.status_code in [400, 401, 402, 403, 404, 405, 406, 409]):
err_dict = res_json.get('error', {})
raise UpCloudAPIError(error_code=err_dict.get('error_code'),
error_message=err_dict.get('error_message'))
return res_json
|
Middleware that raises an exception when HTTP statuscode is an error code.
|
def mean(self, axis=None, keepdims=False):
"""
Return the mean of the array over the given axis.
Parameters
----------
axis : tuple or int, optional, default=None
Axis to compute statistic over, if None
will compute over all axes
keepdims : boolean, optional, default=False
Keep axis remaining after operation with size 1.
"""
return self._stat(axis, name='mean', keepdims=keepdims)
|
Return the mean of the array over the given axis.
Parameters
----------
axis : tuple or int, optional, default=None
Axis to compute statistic over, if None
will compute over all axes
keepdims : boolean, optional, default=False
Keep axis remaining after operation with size 1.
|
def parse_vote_data(self, vote_data):
"""
Parse data from parltrack votes db dumps (1 proposal)
"""
if 'epref' not in vote_data.keys():
logger.debug('Could not import data without epref %s',
vote_data['title'])
return
dossier_pk = self.get_dossier(vote_data['epref'])
if not dossier_pk:
logger.debug('Cannot find dossier with remote id %s',
vote_data['epref'])
return
return self.parse_proposal_data(
proposal_data=vote_data,
dossier_pk=dossier_pk
)
|
Parse data from parltrack votes db dumps (1 proposal)
|
def parse_buffer(buffer, mode="exec", flags=[], version=None, engine=None):
"""
Like :meth:`parse`, but accepts a :class:`source.Buffer` instead of
source and filename, and returns comments as well.
:see: :meth:`parse`
:return: (:class:`ast.AST`, list of :class:`source.Comment`)
Abstract syntax tree and comments
"""
if version is None:
version = sys.version_info[0:2]
if engine is None:
engine = pythonparser_diagnostic.Engine()
lexer = pythonparser_lexer.Lexer(buffer, version, engine)
if mode in ("single", "eval"):
lexer.interactive = True
parser = pythonparser_parser.Parser(lexer, version, engine)
parser.add_flags(flags)
if mode == "exec":
return parser.file_input(), lexer.comments
elif mode == "single":
return parser.single_input(), lexer.comments
elif mode == "eval":
return parser.eval_input(), lexer.comments
|
Like :meth:`parse`, but accepts a :class:`source.Buffer` instead of
source and filename, and returns comments as well.
:see: :meth:`parse`
:return: (:class:`ast.AST`, list of :class:`source.Comment`)
Abstract syntax tree and comments
|
def restore(file_name, jail=None, chroot=None, root=None):
'''
Reads archive created by pkg backup -d and recreates the database.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg
jail
Restore database to the specified jail. Note that this will run the
command within the jail, and so the path to the file from which the pkg
database will be restored is relative to the root of the jail.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg jail=<jail name or id>
chroot
Restore database to the specified chroot (ignored if ``jail`` is
specified). Note that this will run the command within the chroot, and
so the path to the file from which the pkg database will be restored is
relative to the root of the chroot.
root
Restore database to the specified root (ignored if ``jail`` is
specified). Note that this will run the command within the root, and
so the path to the file from which the pkg database will be restored is
relative to the root of the root.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg chroot=/path/to/chroot
'''
return __salt__['cmd.run'](
_pkg(jail, chroot, root) + ['backup', '-r', file_name],
output_loglevel='trace',
python_shell=False
)
|
Reads archive created by pkg backup -d and recreates the database.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg
jail
Restore database to the specified jail. Note that this will run the
command within the jail, and so the path to the file from which the pkg
database will be restored is relative to the root of the jail.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg jail=<jail name or id>
chroot
Restore database to the specified chroot (ignored if ``jail`` is
specified). Note that this will run the command within the chroot, and
so the path to the file from which the pkg database will be restored is
relative to the root of the chroot.
root
Restore database to the specified root (ignored if ``jail`` is
specified). Note that this will run the command within the root, and
so the path to the file from which the pkg database will be restored is
relative to the root of the root.
CLI Example:
.. code-block:: bash
salt '*' pkg.restore /tmp/pkg chroot=/path/to/chroot
|
def contour(self, win, ngr=20, layers=0, levels=20, layout=True, labels=True,
decimals=0, color=None, newfig=True, figsize=None, legend=True):
"""Contour plot
Parameters
----------
win : list or tuple
[x1, x2, y1, y2]
ngr : scalar, tuple or list
if scalar: number of grid points in x and y direction
if tuple or list: nx, ny, number of grid points in x and y direction
layers : integer, list or array
layers for which grid is returned
levels : integer or array (default 20)
levels that are contoured
layout : boolean (default True)
plot layout of elements
labels : boolean (default True)
print labels along contours
decimals : integer (default 0)
number of decimals of labels along contours
color : str or list of strings
color of contour lines
newfig : boolean (default True)
create new figure
figsize : tuple of 2 values (default is mpl default)
size of figure
legend : list or boolean (default True)
add legend to figure
if list of strings: use strings as names in legend
"""
x1, x2, y1, y2 = win
if np.isscalar(ngr):
nx = ny = ngr
else:
nx, ny = ngr
layers = np.atleast_1d(layers)
xg = np.linspace(x1, x2, nx)
yg = np.linspace(y1, y2, ny)
h = self.headgrid(xg, yg, layers)
if newfig:
plt.figure(figsize=figsize)
# color
if color is None:
c = plt.rcParams['axes.prop_cycle'].by_key()['color']
elif type(color) is str:
c = len(layers) * [color]
elif type(color) is list:
c = color
if len(c) < len(layers):
n = np.ceil(self.aq.naq / len(c))
c = n * c
# contour
cscollectionlist = []
for i in range(len(layers)):
cs = plt.contour(xg, yg, h[i], levels, colors=c[i])
cscollectionlist.append(cs.collections[0])
if labels:
fmt = '%1.' + str(decimals) + 'f'
plt.clabel(cs, fmt=fmt)
if type(legend) is list:
plt.legend(cscollectionlist, legend)
elif legend:
legendlist = ['layer ' + str(i) for i in layers]
plt.legend(cscollectionlist, legendlist)
plt.axis('scaled')
if layout:
self.plot(win=[x1, x2, y1, y2], newfig=False)
|
Contour plot
Parameters
----------
win : list or tuple
[x1, x2, y1, y2]
ngr : scalar, tuple or list
if scalar: number of grid points in x and y direction
if tuple or list: nx, ny, number of grid points in x and y direction
layers : integer, list or array
layers for which grid is returned
levels : integer or array (default 20)
levels that are contoured
layout : boolean (default True)
plot layout of elements
labels : boolean (default True)
print labels along contours
decimals : integer (default 0)
number of decimals of labels along contours
color : str or list of strings
color of contour lines
newfig : boolean (default True)
create new figure
figsize : tuple of 2 values (default is mpl default)
size of figure
legend : list or boolean (default True)
add legend to figure
if list of strings: use strings as names in legend
|
def get_value(file, element):
'''
Returns the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.get_value /tmp/test.xml ".//element"
'''
try:
root = ET.parse(file)
element = root.find(element)
return element.text
except AttributeError:
log.error("Unable to find element matching %s", element)
return False
|
Returns the value of the matched xpath element
CLI Example:
.. code-block:: bash
salt '*' xml.get_value /tmp/test.xml ".//element"
|
def prepare_attrib_mapping(self, primitive):
"""Pre-parse buffer mappings for each VBO to detect interleaved data for a primitive"""
buffer_info = []
for name, accessor in primitive.attributes.items():
info = VBOInfo(*accessor.info())
info.attributes.append((name, info.components))
if buffer_info and buffer_info[-1].buffer_view == info.buffer_view:
if buffer_info[-1].interleaves(info):
buffer_info[-1].merge(info)
continue
buffer_info.append(info)
return buffer_info
|
Pre-parse buffer mappings for each VBO to detect interleaved data for a primitive
|
def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs):
"""
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
kwargs are passed to the optimizer. They can be:
:param max_iters: maximum number of function evaluations
:type max_iters: int
:messages: True: Display messages during optimisation, "ipython_notebook":
:type messages: bool"string
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
:type optimizer: string
Valid optimizers are:
- 'scg': scaled conjugate gradient method, recommended for stability.
See also GPy.inference.optimization.scg
- 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc)
- 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin),
- 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b),
- 'lbfgs': the bfgs method (see scipy.optimize.fmin_bfgs),
- 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only!
"""
if self.is_fixed or self.size == 0:
print('nothing to optimize')
return
if not self.update_model():
print("updates were off, setting updates on again")
self.update_model(True)
if start is None:
start = self.optimizer_array
if optimizer is None:
optimizer = self.preferred_optimizer
if isinstance(optimizer, optimization.Optimizer):
opt = optimizer
opt.model = self
else:
optimizer = optimization.get_optimizer(optimizer)
opt = optimizer(max_iters=max_iters, **kwargs)
with VerboseOptimization(self, opt, maxiters=max_iters, verbose=messages, ipython_notebook=ipython_notebook, clear_after_finish=clear_after_finish) as vo:
opt.run(start, f_fp=self._objective_grads, f=self._objective, fp=self._grads)
self.optimizer_array = opt.x_opt
self.optimization_runs.append(opt)
return opt
|
Optimize the model using self.log_likelihood and self.log_likelihood_gradient, as well as self.priors.
kwargs are passed to the optimizer. They can be:
:param max_iters: maximum number of function evaluations
:type max_iters: int
:messages: True: Display messages during optimisation, "ipython_notebook":
:type messages: bool"string
:param optimizer: which optimizer to use (defaults to self.preferred optimizer)
:type optimizer: string
Valid optimizers are:
- 'scg': scaled conjugate gradient method, recommended for stability.
See also GPy.inference.optimization.scg
- 'fmin_tnc': truncated Newton method (see scipy.optimize.fmin_tnc)
- 'simplex': the Nelder-Mead simplex method (see scipy.optimize.fmin),
- 'lbfgsb': the l-bfgs-b method (see scipy.optimize.fmin_l_bfgs_b),
- 'lbfgs': the bfgs method (see scipy.optimize.fmin_bfgs),
- 'sgd': stochastic gradient decsent (see scipy.optimize.sgd). For experts only!
|
def _varian(self, varian):
"""Mengembalikan representasi string untuk varian entri ini.
Dapat digunakan untuk "Varian" maupun "Bentuk tidak baku".
:param varian: List bentuk tidak baku atau varian
:type varian: list
:returns: String representasi varian atau bentuk tidak baku
:rtype: str
"""
if varian == self.bentuk_tidak_baku:
nama = "Bentuk tidak baku"
elif varian == self.varian:
nama = "Varian"
else:
return ''
return nama + ': ' + ', '.join(varian)
|
Mengembalikan representasi string untuk varian entri ini.
Dapat digunakan untuk "Varian" maupun "Bentuk tidak baku".
:param varian: List bentuk tidak baku atau varian
:type varian: list
:returns: String representasi varian atau bentuk tidak baku
:rtype: str
|
def as_encodable(self, index_name):
"""
:param index_name: The name of the index for the query
:return: A dict suitable for passing to `json.dumps()`
"""
if self.facets:
encoded_facets = {}
for name, facet in self.facets.items():
encoded_facets[name] = facet.encodable
self._json_['facets'] = encoded_facets
if self._ms:
# Encode according to scan vectors..
sv_val = {
'level': 'at_plus',
'vectors': {
index_name: self._ms._to_fts_encodable()
}
}
self._json_.setdefault('ctl', {})['consistency'] = sv_val
if self.sort:
if isinstance(self.sort, Sort):
self._json_['sort'] = self.sort.as_encodable()
else:
self._json_['sort'] = self.sort
return self._json_
|
:param index_name: The name of the index for the query
:return: A dict suitable for passing to `json.dumps()`
|
def draw_rect(grid, attr, dc, rect):
"""Draws a rect"""
dc.SetBrush(wx.Brush(wx.Colour(15, 255, 127), wx.SOLID))
dc.SetPen(wx.Pen(wx.BLUE, 1, wx.SOLID))
dc.DrawRectangleRect(rect)
|
Draws a rect
|
def tica(data=None, lag=10, dim=-1, var_cutoff=0.95, kinetic_map=True, commute_map=False, weights='empirical',
stride=1, remove_mean=True, skip=0, reversible=True, ncov_max=float('inf'), chunksize=None, **kwargs):
r""" Time-lagged independent component analysis (TICA).
TICA is a linear transformation method. In contrast to PCA, which finds
coordinates of maximal variance, TICA finds coordinates of maximal
autocorrelation at the given lag time. Therefore, TICA is useful in order
to find the *slow* components in a dataset and thus an excellent choice to
transform molecular dynamics data before clustering data for the
construction of a Markov model. When the input data is the result of a
Markov process (such as thermostatted molecular dynamics), TICA finds in
fact an approximation to the eigenfunctions and eigenvalues of the
underlying Markov operator [1]_.
It estimates a TICA transformation from *data*. When input data is given as
an argument, the estimation will be carried out straight away, and the
resulting object can be used to obtain eigenvalues, eigenvectors or project
input data onto the slowest TICA components. If no data is given, this
object is an empty estimator and can be put into a :func:`pipeline` in
order to use TICA in the streaming mode.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by
source function array with the data, if available. When given, the TICA
transformation is immediately computed and can be used to transform data.
lag : int, optional, default = 10
the lag time, in multiples of the input time step
dim : int, optional, default -1
the number of dimensions (independent components) to project onto. A
call to the :func:`map <pyemma.coordinates.transform.TICA.map>` function
reduces the d-dimensional input to only dim dimensions such that the
data preserves the maximum possible autocorrelation amongst
dim-dimensional linear projections. -1 means all numerically available
dimensions will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions
until their cumulative kinetic variance exceeds the fraction
subspace_variance. var_cutoff=1.0 means all numerically available
dimensions (see epsilon) will be used, unless set by dim. Setting
var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default True
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean
distances in the transformed data approximate kinetic distances [4]_.
This is a good choice when the data is further processed by clustering.
commute_map : bool, optional, default False
Eigenvector_i will be scaled by sqrt(timescale_i / 2). As a result, Euclidean distances in the transformed
data will approximate commute distances [5]_.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it is
often sufficient to estimate transformations at a longer stride. Note
that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
weights : optional, default="empirical"
Re-weighting strategy to be used in order to compute equilibrium covariances from non-equilibrium data.
* "empirical": no re-weighting
* "koopman": use re-weighting procedure from [6]_
* weights: An object that allows to compute re-weighting factors. It must possess a method
weights(X) that accepts a trajectory X (np.ndarray(T, n)) and returns a vector of
re-weighting factors (np.ndarray(T,)).
remove_mean: bool, optional, default True
remove mean during covariance estimation. Should not be turned off.
skip : int, default=0
skip the first initial n frames per trajectory.
reversible: bool, default=True
symmetrize correlation matrices C_0, C_{\tau}.
ncov_max : int, default=infinity
limit the memory usage of the algorithm from [7]_ to an amount that corresponds
to ncov_max additional copies of each correlation matrix
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Returns
-------
tica : a :class:`TICA <pyemma.coordinates.transform.TICA>` transformation object
Object for time-lagged independent component (TICA) analysis.
it contains TICA eigenvalues and eigenvectors, and the projection of
input data to the dominant TICA
Notes
-----
Given a sequence of multivariate data :math:`X_t`, it computes the
mean-free covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T \mathrm{diag}(w) (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T \mathrm{diag}(w) (X_t + \tau - \mu)
where w is a vector of weights for each time step. By default, these weights
are all equal to one, but different weights are possible, like the re-weighting
to equilibrium described in [6]_. Subsequently, the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i r_i,
is solved,where :math:`r_i` are the independent components and :math:`\lambda_i` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math::
t_i = -\frac{\tau}{\ln |\lambda_i|}.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
TICA was originally introduced for signal processing in [2]_. It was
introduced to molecular dynamics and as a method for the construction
of Markov models in [1]_ and [3]_. It was shown in [1]_ that when applied
to molecular dynamics data, TICA is an approximation to the eigenvalues
and eigenvectors of the true underlying dynamics.
Examples
--------
Invoke TICA transformation with a given lag time and output dimension:
>>> import numpy as np
>>> from pyemma.coordinates import tica
>>> data = np.random.random((100,3))
>>> projected_data = tica(data, lag=2, dim=1).get_output()[0]
For a brief explaination why TICA outperforms PCA to extract a good reaction
coordinate have a look `here
<http://docs.markovmodel.org/lecture_tica.html#Example:-TICA-versus-PCA-in-a-stretched-double-well-potential>`_.
See also
--------
:class:`TICA <pyemma.coordinates.transform.TICA>` : tica object
:func:`pca <pyemma.coordinates.pca>` : for principal component analysis
.. autoclass:: pyemma.coordinates.transform.tica.TICA
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.transform.tica.TICA
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.transform.tica.TICA
:attributes:
References
----------
.. [1] Perez-Hernandez G, F Paul, T Giorgino, G De Fabritiis and F Noe. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] L. Molgedey and H. G. Schuster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [3] Schwantes C, V S Pande. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [4] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
.. [5] Noe, F., Banisch, R., Clementi, C. 2016. Commute maps: separating slowly-mixing molecular configurations
for kinetic modeling. J. Chem. Theory. Comput. doi:10.1021/acs.jctc.6b00762
.. [6] Wu, H., Nueske, F., Paul, F., Klus, S., Koltai, P., and Noe, F. 2016. Bias reduced variational
approximation of molecular kinetics from short off-equilibrium simulations. J. Chem. Phys. (submitted),
https://arxiv.org/abs/1610.06773.
.. [7] Chan, T. F., Golub G. H., LeVeque R. J. 1979. Updating formulae and pairwiese algorithms for
computing sample variances. Technical Report STAN-CS-79-773, Department of Computer Science, Stanford University.
"""
from pyemma.coordinates.transform.tica import TICA
from pyemma.coordinates.estimation.koopman import _KoopmanEstimator
import types
from pyemma.util.reflection import get_default_args
cs = _check_old_chunksize_arg(chunksize, get_default_args(tica)['chunksize'], **kwargs)
if isinstance(weights, _string_types):
if weights == "koopman":
if data is None:
raise ValueError("Data must be supplied for reweighting='koopman'")
if not reversible:
raise ValueError("Koopman re-weighting is designed for reversible processes, set reversible=True")
koop = _KoopmanEstimator(lag=lag, stride=stride, skip=skip, ncov_max=ncov_max)
koop.estimate(data, chunksize=cs)
weights = koop.weights
elif weights == "empirical":
weights = None
else:
raise ValueError("reweighting must be either 'empirical', 'koopman' "
"or an object with a weights(data) method.")
elif hasattr(weights, 'weights') and type(getattr(weights, 'weights')) == types.MethodType:
weights = weights
elif isinstance(weights, (list, tuple)) and all(isinstance(w, _np.ndarray) for w in weights):
if data is not None and len(data) != len(weights):
raise ValueError("len of weights({}) must match len of data({}).".format(len(weights), len(data)))
else:
raise ValueError("reweighting must be either 'empirical', 'koopman' or an object with a weights(data) method.")
if not remove_mean:
import warnings
user_msg = 'remove_mean option is deprecated. The mean is removed from the data by default, otherwise it' \
'cannot be guaranteed that all eigenvalues will be smaller than one. Some functionalities might' \
'become useless in this case (e.g. commute_maps). Also, not removing the mean will not result in' \
'a significant speed up of calculations.'
warnings.warn(
user_msg,
category=_PyEMMA_DeprecationWarning)
res = TICA(lag, dim=dim, var_cutoff=var_cutoff, kinetic_map=kinetic_map, commute_map=commute_map, skip=skip, stride=stride,
weights=weights, reversible=reversible, ncov_max=ncov_max)
if data is not None:
res.estimate(data, chunksize=cs)
else:
res.chunksize = cs
return res
|
r""" Time-lagged independent component analysis (TICA).
TICA is a linear transformation method. In contrast to PCA, which finds
coordinates of maximal variance, TICA finds coordinates of maximal
autocorrelation at the given lag time. Therefore, TICA is useful in order
to find the *slow* components in a dataset and thus an excellent choice to
transform molecular dynamics data before clustering data for the
construction of a Markov model. When the input data is the result of a
Markov process (such as thermostatted molecular dynamics), TICA finds in
fact an approximation to the eigenfunctions and eigenvalues of the
underlying Markov operator [1]_.
It estimates a TICA transformation from *data*. When input data is given as
an argument, the estimation will be carried out straight away, and the
resulting object can be used to obtain eigenvalues, eigenvectors or project
input data onto the slowest TICA components. If no data is given, this
object is an empty estimator and can be put into a :func:`pipeline` in
order to use TICA in the streaming mode.
Parameters
----------
data : ndarray (T, d) or list of ndarray (T_i, d) or a reader created by
source function array with the data, if available. When given, the TICA
transformation is immediately computed and can be used to transform data.
lag : int, optional, default = 10
the lag time, in multiples of the input time step
dim : int, optional, default -1
the number of dimensions (independent components) to project onto. A
call to the :func:`map <pyemma.coordinates.transform.TICA.map>` function
reduces the d-dimensional input to only dim dimensions such that the
data preserves the maximum possible autocorrelation amongst
dim-dimensional linear projections. -1 means all numerically available
dimensions will be used unless reduced by var_cutoff.
Setting dim to a positive value is exclusive with var_cutoff.
var_cutoff : float in the range [0,1], optional, default 0.95
Determines the number of output dimensions by including dimensions
until their cumulative kinetic variance exceeds the fraction
subspace_variance. var_cutoff=1.0 means all numerically available
dimensions (see epsilon) will be used, unless set by dim. Setting
var_cutoff smaller than 1.0 is exclusive with dim
kinetic_map : bool, optional, default True
Eigenvectors will be scaled by eigenvalues. As a result, Euclidean
distances in the transformed data approximate kinetic distances [4]_.
This is a good choice when the data is further processed by clustering.
commute_map : bool, optional, default False
Eigenvector_i will be scaled by sqrt(timescale_i / 2). As a result, Euclidean distances in the transformed
data will approximate commute distances [5]_.
stride : int, optional, default = 1
If set to 1, all input data will be used for estimation. Note that this
could cause this calculation to be very slow for large data sets. Since
molecular dynamics data is usually correlated at short timescales, it is
often sufficient to estimate transformations at a longer stride. Note
that the stride option in the get_output() function of the returned
object is independent, so you can parametrize at a long stride, and
still map all frames through the transformer.
weights : optional, default="empirical"
Re-weighting strategy to be used in order to compute equilibrium covariances from non-equilibrium data.
* "empirical": no re-weighting
* "koopman": use re-weighting procedure from [6]_
* weights: An object that allows to compute re-weighting factors. It must possess a method
weights(X) that accepts a trajectory X (np.ndarray(T, n)) and returns a vector of
re-weighting factors (np.ndarray(T,)).
remove_mean: bool, optional, default True
remove mean during covariance estimation. Should not be turned off.
skip : int, default=0
skip the first initial n frames per trajectory.
reversible: bool, default=True
symmetrize correlation matrices C_0, C_{\tau}.
ncov_max : int, default=infinity
limit the memory usage of the algorithm from [7]_ to an amount that corresponds
to ncov_max additional copies of each correlation matrix
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Returns
-------
tica : a :class:`TICA <pyemma.coordinates.transform.TICA>` transformation object
Object for time-lagged independent component (TICA) analysis.
it contains TICA eigenvalues and eigenvectors, and the projection of
input data to the dominant TICA
Notes
-----
Given a sequence of multivariate data :math:`X_t`, it computes the
mean-free covariance and time-lagged covariance matrix:
.. math::
C_0 &= (X_t - \mu)^T \mathrm{diag}(w) (X_t - \mu) \\
C_{\tau} &= (X_t - \mu)^T \mathrm{diag}(w) (X_t + \tau - \mu)
where w is a vector of weights for each time step. By default, these weights
are all equal to one, but different weights are possible, like the re-weighting
to equilibrium described in [6]_. Subsequently, the eigenvalue problem
.. math:: C_{\tau} r_i = C_0 \lambda_i r_i,
is solved,where :math:`r_i` are the independent components and :math:`\lambda_i` are
their respective normalized time-autocorrelations. The eigenvalues are
related to the relaxation timescale by
.. math::
t_i = -\frac{\tau}{\ln |\lambda_i|}.
When used as a dimension reduction method, the input data is projected
onto the dominant independent components.
TICA was originally introduced for signal processing in [2]_. It was
introduced to molecular dynamics and as a method for the construction
of Markov models in [1]_ and [3]_. It was shown in [1]_ that when applied
to molecular dynamics data, TICA is an approximation to the eigenvalues
and eigenvectors of the true underlying dynamics.
Examples
--------
Invoke TICA transformation with a given lag time and output dimension:
>>> import numpy as np
>>> from pyemma.coordinates import tica
>>> data = np.random.random((100,3))
>>> projected_data = tica(data, lag=2, dim=1).get_output()[0]
For a brief explaination why TICA outperforms PCA to extract a good reaction
coordinate have a look `here
<http://docs.markovmodel.org/lecture_tica.html#Example:-TICA-versus-PCA-in-a-stretched-double-well-potential>`_.
See also
--------
:class:`TICA <pyemma.coordinates.transform.TICA>` : tica object
:func:`pca <pyemma.coordinates.pca>` : for principal component analysis
.. autoclass:: pyemma.coordinates.transform.tica.TICA
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.transform.tica.TICA
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.transform.tica.TICA
:attributes:
References
----------
.. [1] Perez-Hernandez G, F Paul, T Giorgino, G De Fabritiis and F Noe. 2013.
Identification of slow molecular order parameters for Markov model construction
J. Chem. Phys. 139, 015102. doi:10.1063/1.4811489
.. [2] L. Molgedey and H. G. Schuster. 1994.
Separation of a mixture of independent signals using time delayed correlations
Phys. Rev. Lett. 72, 3634.
.. [3] Schwantes C, V S Pande. 2013.
Improvements in Markov State Model Construction Reveal Many Non-Native Interactions in the Folding of NTL9
J. Chem. Theory. Comput. 9, 2000-2009. doi:10.1021/ct300878a
.. [4] Noe, F. and Clementi, C. 2015. Kinetic distance and kinetic maps from molecular dynamics simulation.
J. Chem. Theory. Comput. doi:10.1021/acs.jctc.5b00553
.. [5] Noe, F., Banisch, R., Clementi, C. 2016. Commute maps: separating slowly-mixing molecular configurations
for kinetic modeling. J. Chem. Theory. Comput. doi:10.1021/acs.jctc.6b00762
.. [6] Wu, H., Nueske, F., Paul, F., Klus, S., Koltai, P., and Noe, F. 2016. Bias reduced variational
approximation of molecular kinetics from short off-equilibrium simulations. J. Chem. Phys. (submitted),
https://arxiv.org/abs/1610.06773.
.. [7] Chan, T. F., Golub G. H., LeVeque R. J. 1979. Updating formulae and pairwiese algorithms for
computing sample variances. Technical Report STAN-CS-79-773, Department of Computer Science, Stanford University.
|
def getOverlayTransformAbsolute(self, ulOverlayHandle):
"""Gets the transform if it is absolute. Returns an error if the transform is some other type."""
fn = self.function_table.getOverlayTransformAbsolute
peTrackingOrigin = ETrackingUniverseOrigin()
pmatTrackingOriginToOverlayTransform = HmdMatrix34_t()
result = fn(ulOverlayHandle, byref(peTrackingOrigin), byref(pmatTrackingOriginToOverlayTransform))
return result, peTrackingOrigin, pmatTrackingOriginToOverlayTransform
|
Gets the transform if it is absolute. Returns an error if the transform is some other type.
|
def series_resistors(target,
pore_area='pore.area',
throat_area='throat.area',
pore_conductivity='pore.electrical_conductivity',
throat_conductivity='throat.electrical_conductivity',
conduit_lengths='throat.conduit_lengths',
conduit_shape_factors='throat.poisson_shape_factors'):
r"""
Calculate the electrical conductance of conduits in network, where a
conduit is ( 1/2 pore - full throat - 1/2 pore ). See the notes section.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_thermal_conductivity : string
Dictionary key of the pore thermal conductivity values
throat_thermal_conductivity : string
Dictionary key of the throat thermal conductivity values
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing electrical conductance values for conduits in the
geometry attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper flow_shape_factor argument.
"""
return generic_conductance(target=target, transport_type='diffusion',
pore_area=pore_area,
throat_area=throat_area,
pore_diffusivity=pore_conductivity,
throat_diffusivity=throat_conductivity,
conduit_lengths=conduit_lengths,
conduit_shape_factors=conduit_shape_factors)
|
r"""
Calculate the electrical conductance of conduits in network, where a
conduit is ( 1/2 pore - full throat - 1/2 pore ). See the notes section.
Parameters
----------
target : OpenPNM Object
The object which this model is associated with. This controls the
length of the calculated array, and also provides access to other
necessary properties.
pore_thermal_conductivity : string
Dictionary key of the pore thermal conductivity values
throat_thermal_conductivity : string
Dictionary key of the throat thermal conductivity values
pore_area : string
Dictionary key of the pore area values
throat_area : string
Dictionary key of the throat area values
conduit_shape_factors : string
Dictionary key of the conduit DIFFUSION shape factor values
Returns
-------
g : ndarray
Array containing electrical conductance values for conduits in the
geometry attached to the given physics object.
Notes
-----
(1) This function requires that all the necessary phase properties already
be calculated.
(2) This function calculates the specified property for the *entire*
network then extracts the values for the appropriate throats at the end.
(3) This function assumes cylindrical throats with constant cross-section
area. Corrections for different shapes and variable cross-section area can
be imposed by passing the proper flow_shape_factor argument.
|
def convert_row(self, keyed_row, schema, fallbacks):
"""Convert row to SQL
"""
for key, value in list(keyed_row.items()):
field = schema.get_field(key)
if not field:
del keyed_row[key]
if key in fallbacks:
value = _uncast_value(value, field=field)
else:
value = field.cast_value(value)
keyed_row[key] = value
return keyed_row
|
Convert row to SQL
|
def create_class(self):
"""
Build the estimator class.
Returns
-------
:return : string
The built class as string.
"""
if self.target_language in ['java', 'go']:
n_indents = 1 if self.target_language == 'java' else 0
class_head_temp = self.temp('separated.{}.class'.format(
self.prefix), n_indents=n_indents, skipping=True)
self.class_head = class_head_temp.format(**self.__dict__)
return self.temp('separated.class').format(**self.__dict__)
|
Build the estimator class.
Returns
-------
:return : string
The built class as string.
|
def part(self, target, reason=None):
"""quit a channel"""
if reason:
target += ' :' + reason
self.send_line('PART %s' % target)
|
quit a channel
|
def capture(self, pattern=None, negate=False, workers=None, negate_workers=False,
params=None, success=False, error=True, stats=False):
"""Starts capturing selected events in real-time. You can filter exactly what
you want to see, as the Clearly Server handles all tasks and workers updates
being sent to celery. Several clients can see different sets of events at the
same time.
This runs in the foreground, so you can see in real-time exactly what your
clients and celery workers are doing.
Press CTRL+C at any time to stop it.
Args:
Filter args:
pattern (Optional[str]): a pattern to filter tasks to capture.
ex.: '^dispatch|^email' to filter names starting with that
or 'dispatch.*123456' to filter that exact name and number
or even '123456' to filter that exact number anywhere.
negate (bool): if True, finds tasks that do not match criteria.
workers (Optional[str]): a pattern to filter workers to capture.
ex.: 'service|priority' to filter names containing that
negate_workers (bool): if True, finds workers that do not match criteria.
Display args:
params (Optional[bool]): if True shows args and kwargs in the first and
last seen states, if False never shows, and if None follows the
success and error arguments.
default is None
success (bool): if True shows successful tasks' results.
default is False
error (bool): if True shows failed and retried tasks' tracebacks.
default is True, as you're monitoring to find errors, right?
stats (bool): if True shows complete workers' stats.
default is False
"""
request = clearly_pb2.CaptureRequest(
tasks_capture=clearly_pb2.PatternFilter(pattern=pattern or '.',
negate=negate),
workers_capture=clearly_pb2.PatternFilter(pattern=workers or '.',
negate=negate_workers),
)
try:
for realtime in self._stub.capture_realtime(request):
if realtime.HasField('task'):
ClearlyClient._display_task(realtime.task, params, success, error)
elif realtime.HasField('worker'):
ClearlyClient._display_worker(realtime.worker, stats)
else:
print('unknown event:', realtime)
break
except KeyboardInterrupt:
pass
|
Starts capturing selected events in real-time. You can filter exactly what
you want to see, as the Clearly Server handles all tasks and workers updates
being sent to celery. Several clients can see different sets of events at the
same time.
This runs in the foreground, so you can see in real-time exactly what your
clients and celery workers are doing.
Press CTRL+C at any time to stop it.
Args:
Filter args:
pattern (Optional[str]): a pattern to filter tasks to capture.
ex.: '^dispatch|^email' to filter names starting with that
or 'dispatch.*123456' to filter that exact name and number
or even '123456' to filter that exact number anywhere.
negate (bool): if True, finds tasks that do not match criteria.
workers (Optional[str]): a pattern to filter workers to capture.
ex.: 'service|priority' to filter names containing that
negate_workers (bool): if True, finds workers that do not match criteria.
Display args:
params (Optional[bool]): if True shows args and kwargs in the first and
last seen states, if False never shows, and if None follows the
success and error arguments.
default is None
success (bool): if True shows successful tasks' results.
default is False
error (bool): if True shows failed and retried tasks' tracebacks.
default is True, as you're monitoring to find errors, right?
stats (bool): if True shows complete workers' stats.
default is False
|
def gcmt_to_simple_array(self, centroid_location=True):
"""
Converts the GCMT catalogue to a simple array of
[ID, year, month, day, hour, minute, second, long., lat., depth, Mw,
strike1, dip1, rake1, strike2, dip2, rake2, b-plunge, b-azimuth,
b-eigenvalue, p-plunge, p-azimuth, p-eigenvalue, t-plunge, t-azimuth,
t-eigenvalue, moment, f_clvd, erel]
"""
catalogue = np.zeros([self.get_number_tensors(), 29], dtype=float)
for iloc, tensor in enumerate(self.gcmts):
catalogue[iloc, 0] = iloc
if centroid_location:
catalogue[iloc, 1] = float(tensor.centroid.date.year)
catalogue[iloc, 2] = float(tensor.centroid.date.month)
catalogue[iloc, 3] = float(tensor.centroid.date.day)
catalogue[iloc, 4] = float(tensor.centroid.time.hour)
catalogue[iloc, 5] = float(tensor.centroid.time.minute)
catalogue[iloc, 6] = np.round(
np.float(tensor.centroid.time.second) +
np.float(tensor.centroid.time.microsecond) / 1000000., 2)
catalogue[iloc, 7] = tensor.centroid.longitude
catalogue[iloc, 8] = tensor.centroid.latitude
catalogue[iloc, 9] = tensor.centroid.depth
else:
catalogue[iloc, 1] = float(tensor.hypocentre.date.year)
catalogue[iloc, 2] = float(tensor.hypocentre.date.month)
catalogue[iloc, 3] = float(tensor.hypocentre.date.day)
catalogue[iloc, 4] = float(tensor.hypocentre.time.hour)
catalogue[iloc, 5] = float(tensor.hypocentre.time.minute)
catalogue[iloc, 6] = np.round(
np.float(tensor.centroid.time.second) +
np.float(tensor.centroid.time.microsecond) / 1000000., 2)
catalogue[iloc, 7] = tensor.hypocentre.longitude
catalogue[iloc, 8] = tensor.hypocentre.latitude
catalogue[iloc, 9] = tensor.hypocentre.depth
catalogue[iloc, 10] = tensor.magnitude
catalogue[iloc, 11] = tensor.moment
catalogue[iloc, 12] = tensor.f_clvd
catalogue[iloc, 13] = tensor.e_rel
# Nodal planes
catalogue[iloc, 14] = tensor.nodal_planes.nodal_plane_1['strike']
catalogue[iloc, 15] = tensor.nodal_planes.nodal_plane_1['dip']
catalogue[iloc, 16] = tensor.nodal_planes.nodal_plane_1['rake']
catalogue[iloc, 17] = tensor.nodal_planes.nodal_plane_2['strike']
catalogue[iloc, 18] = tensor.nodal_planes.nodal_plane_2['dip']
catalogue[iloc, 19] = tensor.nodal_planes.nodal_plane_2['rake']
# Principal axes
catalogue[iloc, 20] = tensor.principal_axes.b_axis['eigenvalue']
catalogue[iloc, 21] = tensor.principal_axes.b_axis['azimuth']
catalogue[iloc, 22] = tensor.principal_axes.b_axis['plunge']
catalogue[iloc, 23] = tensor.principal_axes.p_axis['eigenvalue']
catalogue[iloc, 24] = tensor.principal_axes.p_axis['azimuth']
catalogue[iloc, 25] = tensor.principal_axes.p_axis['plunge']
catalogue[iloc, 26] = tensor.principal_axes.t_axis['eigenvalue']
catalogue[iloc, 27] = tensor.principal_axes.t_axis['azimuth']
catalogue[iloc, 28] = tensor.principal_axes.t_axis['plunge']
return catalogue
|
Converts the GCMT catalogue to a simple array of
[ID, year, month, day, hour, minute, second, long., lat., depth, Mw,
strike1, dip1, rake1, strike2, dip2, rake2, b-plunge, b-azimuth,
b-eigenvalue, p-plunge, p-azimuth, p-eigenvalue, t-plunge, t-azimuth,
t-eigenvalue, moment, f_clvd, erel]
|
def get_one_file_in(dirname):
"""Return the pathname of the one file in a directory.
Raises if the directory has no files or more than one file.
"""
files = os.listdir(dirname)
if len(files) > 1:
raise Failure('More than one file exists in %s:\n%s' %
(dirname, '\n'.join(sorted(files))))
elif not files:
raise Failure('No files found in %s' % dirname)
return os.path.join(dirname, files[0])
|
Return the pathname of the one file in a directory.
Raises if the directory has no files or more than one file.
|
def generate_covalent_bond_graph(covalent_bonds):
"""Generates a graph of the covalent bond network described by the interactions.
Parameters
----------
covalent_bonds: [CovalentBond]
List of `CovalentBond`.
Returns
-------
bond_graph: networkx.Graph
A graph of the covalent bond network.
"""
bond_graph=networkx.Graph()
for inter in covalent_bonds:
bond_graph.add_edge(inter.a, inter.b)
return bond_graph
|
Generates a graph of the covalent bond network described by the interactions.
Parameters
----------
covalent_bonds: [CovalentBond]
List of `CovalentBond`.
Returns
-------
bond_graph: networkx.Graph
A graph of the covalent bond network.
|
def describe(self, *cols):
"""Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
"""
if len(cols) == 1 and isinstance(cols[0], list):
cols = cols[0]
jdf = self._jdf.describe(self._jseq(cols))
return DataFrame(jdf, self.sql_ctx)
|
Computes basic statistics for numeric and string columns.
This include count, mean, stddev, min, and max. If no columns are
given, this function computes statistics for all numerical or string columns.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
>>> df.describe(['age']).show()
+-------+------------------+
|summary| age|
+-------+------------------+
| count| 2|
| mean| 3.5|
| stddev|2.1213203435596424|
| min| 2|
| max| 5|
+-------+------------------+
>>> df.describe().show()
+-------+------------------+-----+
|summary| age| name|
+-------+------------------+-----+
| count| 2| 2|
| mean| 3.5| null|
| stddev|2.1213203435596424| null|
| min| 2|Alice|
| max| 5| Bob|
+-------+------------------+-----+
Use summary for expanded statistics and control over which statistics to compute.
|
def get_reconciler(config, metrics, rrset_channel, changes_channel, **kw):
"""Get a GDNSReconciler client.
A factory function that validates configuration, creates an auth
and :class:`GDNSClient` instance, and returns a GDNSReconciler
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue from which to consume
record set messages to validate.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Reconciler.
Returns:
A :class:`GDNSReconciler` instance.
"""
builder = reconciler.GDNSReconcilerBuilder(
config, metrics, rrset_channel, changes_channel, **kw)
return builder.build_reconciler()
|
Get a GDNSReconciler client.
A factory function that validates configuration, creates an auth
and :class:`GDNSClient` instance, and returns a GDNSReconciler
provider.
Args:
config (dict): Google Cloud Pub/Sub-related configuration.
metrics (obj): :interface:`IMetricRelay` implementation.
rrset_channel (asyncio.Queue): Queue from which to consume
record set messages to validate.
changes_channel (asyncio.Queue): Queue to publish message to
make corrections to Cloud DNS.
kw (dict): Additional keyword arguments to pass to the
Reconciler.
Returns:
A :class:`GDNSReconciler` instance.
|
def tensor_info_proto_maps_match(map_a, map_b):
"""Whether two signature inputs/outputs match in dtype, shape and sparsity.
Args:
map_a: A proto map<string,TensorInfo>.
map_b: A proto map<string,TensorInfo>.
Returns:
A boolean whether `map_a` and `map_b` tensors have the same dtype, shape and
sparsity.
"""
iter_a = sorted(parse_tensor_info_map(map_a).items())
iter_b = sorted(parse_tensor_info_map(map_b).items())
if len(iter_a) != len(iter_b):
return False # Mismatch count.
for info_a, info_b in zip(iter_a, iter_b):
if info_a[0] != info_b[0]:
return False # Mismatch keys.
if _is_sparse(info_a[1]) != _is_sparse(info_b[1]):
return False
if info_a[1].dtype != info_b[1].dtype:
return False
if not _shape_match(info_a[1].get_shape(), info_b[1].get_shape()):
return False
return True
|
Whether two signature inputs/outputs match in dtype, shape and sparsity.
Args:
map_a: A proto map<string,TensorInfo>.
map_b: A proto map<string,TensorInfo>.
Returns:
A boolean whether `map_a` and `map_b` tensors have the same dtype, shape and
sparsity.
|
def _get_headers(environ):
# type: (Dict[str, str]) -> Iterator[Tuple[str, str]]
"""
Returns only proper HTTP headers.
"""
for key, value in environ.items():
key = str(key)
if key.startswith("HTTP_") and key not in (
"HTTP_CONTENT_TYPE",
"HTTP_CONTENT_LENGTH",
):
yield key[5:].replace("_", "-").title(), value
elif key in ("CONTENT_TYPE", "CONTENT_LENGTH"):
yield key.replace("_", "-").title(), value
|
Returns only proper HTTP headers.
|
def get_cameras_properties(self):
"""Return camera properties."""
resource = "cameras"
resource_event = self.publish_and_get_event(resource)
if resource_event:
self._last_refresh = int(time.time())
self._camera_properties = resource_event.get('properties')
|
Return camera properties.
|
def datalog(self, parameter, run, maxrun=None, det_id='D_ARCA001'):
"Retrieve datalogs for given parameter, run(s) and detector"
parameter = parameter.lower()
if maxrun is None:
maxrun = run
with Timer('Database lookup'):
return self._datalog(parameter, run, maxrun, det_id)
|
Retrieve datalogs for given parameter, run(s) and detector
|
def gain(abf):
"""easy way to plot a gain function."""
Ys=np.nan_to_num(swhlab.ap.getAvgBySweep(abf,'freq'))
Xs=abf.clampValues(abf.dataX[int(abf.protoSeqX[1]+.01)])
swhlab.plot.new(abf,title="gain function",xlabel="command current (pA)",
ylabel="average inst. freq. (Hz)")
pylab.plot(Xs,Ys,'.-',ms=20,alpha=.5,color='b')
pylab.axhline(0,alpha=.5,lw=2,color='r',ls="--")
pylab.margins(.1,.1)
|
easy way to plot a gain function.
|
def host_inventory_get(hostids, **kwargs):
'''
Retrieve host inventory according to the given parameters.
See: https://www.zabbix.com/documentation/2.4/manual/api/reference/host/object#host_inventory
.. versionadded:: 2019.2.0
:param hostids: Return only host interfaces used by the given hosts.
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Array with host interfaces details, False if no convenient host interfaces found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.host_inventory_get 101054
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'host.get'
params = {"selectInventory": "extend"}
if hostids:
params.setdefault('hostids', hostids)
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result'][0]['inventory'] if ret['result'][0]['inventory'] else False
else:
raise KeyError
except KeyError:
return ret
|
Retrieve host inventory according to the given parameters.
See: https://www.zabbix.com/documentation/2.4/manual/api/reference/host/object#host_inventory
.. versionadded:: 2019.2.0
:param hostids: Return only host interfaces used by the given hosts.
:param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring)
:param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring)
:param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring)
:return: Array with host interfaces details, False if no convenient host interfaces found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.host_inventory_get 101054
|
async def delete(self, _id=None):
"""Delete entry from database table.
Accepts id.
delete(id) => 1 (if exists)
delete(id) => {"error":404, "reason":"Not found"} (if does not exist)
delete() => {"error":400, "reason":"Missed required fields"}
"""
if not _id:
return {"error":400,
"reason":"Missed required fields"}
document = await self.collection.find_one({"id": _id})
if not document:
return {"error":404,
"reason":"Not found"}
deleted_count = await self.collection.delete_one(
{"id": _id}).deleted_count
return deleted_count
|
Delete entry from database table.
Accepts id.
delete(id) => 1 (if exists)
delete(id) => {"error":404, "reason":"Not found"} (if does not exist)
delete() => {"error":400, "reason":"Missed required fields"}
|
def get_raw_mempool(self, id=None, endpoint=None):
"""
Returns the tx that are in the memorypool of the endpoint
Args:
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
"""
return self._call_endpoint(GET_RAW_MEMPOOL, id=id, endpoint=endpoint)
|
Returns the tx that are in the memorypool of the endpoint
Args:
id: (int, optional) id to use for response tracking
endpoint: (RPCEndpoint, optional) endpoint to specify to use
Returns:
json object of the result or the error encountered in the RPC call
|
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
cmd_args.extend(['-o', options])
cmd_args.extend([device, mountpoint])
try:
subprocess.check_output(cmd_args)
except subprocess.CalledProcessError as e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
if persist:
return fstab_add(device, mountpoint, filesystem, options=options)
return True
|
Mount a filesystem at a particular mountpoint
|
def _watch_file(self, filepath, trigger_event=True):
"""Adds the file's modified time into its internal watchlist."""
is_new = filepath not in self._watched_files
if trigger_event:
if is_new:
self.trigger_created(filepath)
else:
self.trigger_modified(filepath)
try:
self._watched_files[filepath] = self._get_modified_time(filepath)
except OSError:
return
|
Adds the file's modified time into its internal watchlist.
|
def get_or_none(cls, **filter_kwargs):
"""
Returns a video or None.
"""
try:
video = cls.objects.get(**filter_kwargs)
except cls.DoesNotExist:
video = None
return video
|
Returns a video or None.
|
def PrintIndented(self, file, ident, code):
"""Takes an array, add indentation to each entry and prints it."""
for entry in code:
print >>file, '%s%s' % (ident, entry)
|
Takes an array, add indentation to each entry and prints it.
|
def move(self, direction, absolute=False, pad_name=None, refresh=True):
""" Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
"""
# pad in this lists have the current line highlighted
cursor_line = [ 'streams' ]
# pads in this list will be moved screen-wise as opposed to line-wise
# if absolute is set, will go all the way top or all the way down depending
# on direction
scroll_only = [ 'help' ]
if not pad_name:
pad_name = self.current_pad
pad = self.pads[pad_name]
if pad_name == 'streams' and self.no_streams:
return
(row, col) = pad.getyx()
new_row = row
offset = self.offsets[pad_name]
new_offset = offset
if pad_name in scroll_only:
if absolute:
if direction > 0:
new_offset = pad.getmaxyx()[0] - self.pad_h + 1
else:
new_offset = 0
else:
if direction > 0:
new_offset = min(pad.getmaxyx()[0] - self.pad_h + 1, offset + self.pad_h)
elif offset > 0:
new_offset = max(0, offset - self.pad_h)
else:
if absolute and direction >= 0 and direction < pad.getmaxyx()[0]:
if direction < offset:
new_offset = direction
elif direction > offset + self.pad_h - 2:
new_offset = direction - self.pad_h + 2
new_row = direction
else:
if direction == -1 and row > 0:
if row == offset:
new_offset -= 1
new_row = row-1
elif direction == 1 and row < len(self.filtered_streams)-1:
if row == offset + self.pad_h - 2:
new_offset += 1
new_row = row+1
if pad_name in cursor_line:
pad.move(row, 0)
pad.chgat(curses.A_NORMAL)
self.offsets[pad_name] = new_offset
pad.move(new_row, 0)
if pad_name in cursor_line:
pad.chgat(curses.A_REVERSE)
if pad_name == 'streams':
self.redraw_stream_footer()
if refresh:
self.refresh_current_pad()
|
Scroll the current pad
direction : (int) move by one in the given direction
-1 is up, 1 is down. If absolute is True,
go to position direction.
Behaviour is affected by cursor_line and scroll_only below
absolute : (bool)
|
def token_info(token, refresh=True, refresh_cb=None, session=None):
"""
:param OAuthToken token
:param bool refresh:
whether to attempt to refresh the OAuth token if it expired.
default is `True`.
:param refresh_cb:
If specified, a callable object which is given the new token
in parameter if it has been refreshed.
:param requests.Session session:
Optional `requests` session to use.
:return:
token information. see
https://developers.google.com/identity/protocols/OAuth2UserAgent#tokeninfo-validation
- `scope`: this field is not a space-delimited set of scopes
but a real Python `set`.
- `token`: additional field that provides the `OAuthToken`
- `refreshed`: boolean that will tell if the token has been refreshed
:rtype: nameddict
"""
session = session or HTTP_SESSION
params = dict(access_token=token.access_token)
resp = session.get(TOKEN_INFO_URL, params=params)
if resp.status_code != 200:
if refresh:
token = refresh_token(token, session=session)
if refresh_cb is not None:
try:
refresh_cb(token)
except Exception:
LOGGER.exception('OAuth token refresh callback failed')
info = token_info(token, refresh=False, session=session)
info.update(refreshed=True)
return info
raise OAuthTokenRefreshRequiredError()
info = __coerce_token_info(resp.json())
info.update(token=token, refreshed=False)
return nameddict(info)
|
:param OAuthToken token
:param bool refresh:
whether to attempt to refresh the OAuth token if it expired.
default is `True`.
:param refresh_cb:
If specified, a callable object which is given the new token
in parameter if it has been refreshed.
:param requests.Session session:
Optional `requests` session to use.
:return:
token information. see
https://developers.google.com/identity/protocols/OAuth2UserAgent#tokeninfo-validation
- `scope`: this field is not a space-delimited set of scopes
but a real Python `set`.
- `token`: additional field that provides the `OAuthToken`
- `refreshed`: boolean that will tell if the token has been refreshed
:rtype: nameddict
|
def p_unrelate_statement_2(self, p):
'''statement : UNRELATE instance_name FROM instance_name ACROSS rel_id DOT phrase'''
p[0] = UnrelateNode(from_variable_name=p[2],
to_variable_name=p[4],
rel_id=p[6],
phrase=p[8])
|
statement : UNRELATE instance_name FROM instance_name ACROSS rel_id DOT phrase
|
def get_config( config_path=CONFIG_PATH ):
"""
Get the config
"""
parser = SafeConfigParser()
parser.read( config_path )
config_dir = os.path.dirname(config_path)
immutable_key = False
key_id = None
blockchain_id = None
hostname = socket.gethostname()
wallet = None
if parser.has_section('blockstack-file'):
if parser.has_option('blockstack-file', 'immutable_key'):
immutable_key = parser.get('blockstack-file', 'immutable_key')
if immutable_key.lower() in ['1', 'yes', 'true']:
immutable_key = True
else:
immutable_key = False
if parser.has_option('blockstack-file', 'file_id'):
key_id = parser.get('blockstack-file', 'key_id' )
if parser.has_option('blockstack-file', 'blockchain_id'):
blockchain_id = parser.get('blockstack-file', 'blockchain_id')
if parser.has_option('blockstack-file', 'hostname'):
hostname = parser.get('blockstack-file', 'hostname')
if parser.has_option('blockstack-file', 'wallet'):
wallet = parser.get('blockstack-file', 'wallet')
config = {
'immutable_key': immutable_key,
'key_id': key_id,
'blockchain_id': blockchain_id,
'hostname': hostname,
'wallet': wallet
}
return config
|
Get the config
|
def draw(self, **kwargs):
"""
Renders the rfecv curve.
"""
# Compute the curves
x = self.n_feature_subsets_
means = self.cv_scores_.mean(axis=1)
sigmas = self.cv_scores_.std(axis=1)
# Plot one standard deviation above and below the mean
self.ax.fill_between(x, means - sigmas, means+sigmas, alpha=0.25)
# Plot the curve
self.ax.plot(x, means, 'o-')
# Plot the maximum number of features
self.ax.axvline(
self.n_features_, c='k', ls='--',
label="n_features = {}\nscore = {:0.3f}".format(
self.n_features_, self.cv_scores_.mean(axis=1).max()
)
)
return self.ax
|
Renders the rfecv curve.
|
def reset(cwd,
opts='',
git_opts='',
user=None,
password=None,
identity=None,
ignore_retcode=False,
output_encoding=None):
'''
Interface to `git-reset(1)`_, returns the stdout from the git command
cwd
The path to the git checkout
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` (as in the CLI examples
below) to avoid causing errors with Salt's own argument parsing.
git_opts
Any additional options to add to git command itself (not the ``reset``
subcommand), in a single string. This is useful for passing ``-c`` to
run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
identity
Path to a private key to use for ssh URLs. Salt will not attempt to use
passphrase-protected keys unless invoked from the minion using
``salt-call``, to prevent blocking waiting for user input. Key can also
be specified as a SaltStack file server URL, eg.
``salt://location/identity_file``.
.. note::
For greater security with passphraseless private keys, see the
`sshd(8)`_ manpage for information on securing the keypair from the
remote side in the ``authorized_keys`` file.
.. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE_FORMAT
.. versionadded:: 2018.3.5,2019.2.1,Neon
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-reset(1)`: http://git-scm.com/docs/git-reset
CLI Examples:
.. code-block:: bash
# Soft reset to a specific commit ID
salt myminion git.reset /path/to/repo ac3ee5c
# Hard reset
salt myminion git.reset /path/to/repo opts='--hard origin/master'
'''
cwd = _expand_path(cwd, user)
command = ['git'] + _format_git_opts(git_opts)
command.append('reset')
command.extend(_format_opts(opts))
return _git_run(command,
cwd=cwd,
user=user,
password=password,
identity=identity,
ignore_retcode=ignore_retcode,
output_encoding=output_encoding)['stdout']
|
Interface to `git-reset(1)`_, returns the stdout from the git command
cwd
The path to the git checkout
opts
Any additional options to add to the command line, in a single string
.. note::
On the Salt CLI, if the opts are preceded with a dash, it is
necessary to precede them with ``opts=`` (as in the CLI examples
below) to avoid causing errors with Salt's own argument parsing.
git_opts
Any additional options to add to git command itself (not the ``reset``
subcommand), in a single string. This is useful for passing ``-c`` to
run git with temporary changes to the git configuration.
.. versionadded:: 2017.7.0
.. note::
This is only supported in git 1.7.2 and newer.
user
User under which to run the git command. By default, the command is run
by the user under which the minion is running.
password
Windows only. Required when specifying ``user``. This parameter will be
ignored on non-Windows platforms.
.. versionadded:: 2016.3.4
identity
Path to a private key to use for ssh URLs. Salt will not attempt to use
passphrase-protected keys unless invoked from the minion using
``salt-call``, to prevent blocking waiting for user input. Key can also
be specified as a SaltStack file server URL, eg.
``salt://location/identity_file``.
.. note::
For greater security with passphraseless private keys, see the
`sshd(8)`_ manpage for information on securing the keypair from the
remote side in the ``authorized_keys`` file.
.. _`sshd(8)`: http://www.man7.org/linux/man-pages/man8/sshd.8.html#AUTHORIZED_KEYS_FILE_FORMAT
.. versionadded:: 2018.3.5,2019.2.1,Neon
ignore_retcode : False
If ``True``, do not log an error to the minion log if the git command
returns a nonzero exit status.
.. versionadded:: 2015.8.0
output_encoding
Use this option to specify which encoding to use to decode the output
from any git commands which are run. This should not be needed in most
cases.
.. note::
This should only be needed if the files in the repository were
created with filenames using an encoding other than UTF-8 to handle
Unicode characters.
.. versionadded:: 2018.3.1
.. _`git-reset(1)`: http://git-scm.com/docs/git-reset
CLI Examples:
.. code-block:: bash
# Soft reset to a specific commit ID
salt myminion git.reset /path/to/repo ac3ee5c
# Hard reset
salt myminion git.reset /path/to/repo opts='--hard origin/master'
|
def _take_forced_measurement(self):
"""Take a forced measurement.
In forced mode, the BME sensor goes back to sleep after each
measurement and we need to set it to forced mode once at this point,
so it will take the next measurement and then return to sleep again.
In normal mode simply does new measurements periodically.
"""
# set to forced mode, i.e. "take next measurement"
self._bus.write_byte_data(self._i2c_add, 0xF4, self.ctrl_meas_reg)
while self._bus.read_byte_data(self._i2c_add, 0xF3) & 0x08:
sleep(0.005)
|
Take a forced measurement.
In forced mode, the BME sensor goes back to sleep after each
measurement and we need to set it to forced mode once at this point,
so it will take the next measurement and then return to sleep again.
In normal mode simply does new measurements periodically.
|
def _nan_minmax_object(func, fill_value, value, axis=None, **kwargs):
""" In house nanmin and nanmax for object array """
valid_count = count(value, axis=axis)
filled_value = fillna(value, fill_value)
data = getattr(np, func)(filled_value, axis=axis, **kwargs)
if not hasattr(data, 'dtype'): # scalar case
data = dtypes.fill_value(value.dtype) if valid_count == 0 else data
return np.array(data, dtype=value.dtype)
return where_method(data, valid_count != 0)
|
In house nanmin and nanmax for object array
|
def BuildChecks(self, request):
"""Parses request and returns a list of filter callables.
Each callable will be called with the StatEntry and returns True if the
entry should be suppressed.
Args:
request: A FindSpec that describes the search.
Returns:
a list of callables which return True if the file is to be suppressed.
"""
result = []
if request.HasField("start_time") or request.HasField("end_time"):
def FilterTimestamp(file_stat, request=request):
return file_stat.HasField("st_mtime") and (
file_stat.st_mtime < request.start_time or
file_stat.st_mtime > request.end_time)
result.append(FilterTimestamp)
if request.HasField("min_file_size") or request.HasField("max_file_size"):
def FilterSize(file_stat, request=request):
return file_stat.HasField("st_size") and (
file_stat.st_size < request.min_file_size or
file_stat.st_size > request.max_file_size)
result.append(FilterSize)
if request.HasField("perm_mode"):
def FilterPerms(file_stat, request=request):
return (file_stat.st_mode & request.perm_mask) != request.perm_mode
result.append(FilterPerms)
if request.HasField("uid"):
def FilterUID(file_stat, request=request):
return file_stat.st_uid != request.uid
result.append(FilterUID)
if request.HasField("gid"):
def FilterGID(file_stat, request=request):
return file_stat.st_gid != request.gid
result.append(FilterGID)
if request.HasField("path_regex"):
regex = request.path_regex
def FilterPath(file_stat, regex=regex):
"""Suppress any filename not matching the regular expression."""
return not regex.Search(file_stat.pathspec.Basename())
result.append(FilterPath)
if request.HasField("data_regex"):
def FilterData(file_stat, **_):
"""Suppress files that do not match the content."""
return not self.TestFileContent(file_stat)
result.append(FilterData)
return result
|
Parses request and returns a list of filter callables.
Each callable will be called with the StatEntry and returns True if the
entry should be suppressed.
Args:
request: A FindSpec that describes the search.
Returns:
a list of callables which return True if the file is to be suppressed.
|
def show_script_error(self, parent):
"""
Show the last script error (if any)
"""
if self.service.scriptRunner.error != '':
dlg = Gtk.MessageDialog(type=Gtk.MessageType.INFO, buttons=Gtk.ButtonsType.OK,
message_format=self.service.scriptRunner.error)
self.service.scriptRunner.error = ''
# revert the tray icon
self.notifier.set_icon(cm.ConfigManager.SETTINGS[cm.NOTIFICATION_ICON])
self.notifier.errorItem.hide()
self.notifier.update_visible_status()
else:
dlg = Gtk.MessageDialog(type=Gtk.MessageType.INFO, buttons=Gtk.ButtonsType.OK,
message_format=_("No error information available"))
dlg.set_title(_("View script error"))
dlg.set_transient_for(parent)
dlg.run()
dlg.destroy()
|
Show the last script error (if any)
|
def print_tree(self) -> str:
"""Convert AST object to tree view of BEL AST
Returns:
printed tree of BEL AST
"""
if self.ast:
return self.ast.print_tree(ast_obj=self.ast)
else:
return ""
|
Convert AST object to tree view of BEL AST
Returns:
printed tree of BEL AST
|
def __arguments(self, ttype, tvalue):
"""Arguments parsing method
Entry point for command arguments parsing. The parser must
call this method for each parsed command (either a control,
action or test).
Syntax:
*argument [ test / test-list ]
:param ttype: current token type
:param tvalue: current token value
:return: False if an error is encountered, True otherwise
"""
if ttype == "identifier":
test = get_command_instance(tvalue.decode("ascii"), self.__curcommand)
self.__curcommand.check_next_arg("test", test)
self.__expected = test.get_expected_first()
self.__curcommand = test
return self.__check_command_completion(testsemicolon=False)
if ttype == "left_parenthesis":
self.__set_expected("identifier")
return True
if ttype == "comma":
self.__set_expected("identifier")
return True
if ttype == "right_parenthesis":
self.__up()
return True
if self.__argument(ttype, tvalue):
return self.__check_command_completion(testsemicolon=False)
return False
|
Arguments parsing method
Entry point for command arguments parsing. The parser must
call this method for each parsed command (either a control,
action or test).
Syntax:
*argument [ test / test-list ]
:param ttype: current token type
:param tvalue: current token value
:return: False if an error is encountered, True otherwise
|
def exec_event_handler(self, event, transactional=False):
"""Execute the Async set to be run on event."""
# QUESTION: Should we raise an exception if `event` is not in some
# known event-type list?
callbacks = self._options.get('callbacks', {})
handler = callbacks.get(event)
if not handler:
raise Exception('Handler not defined!!!')
handler.start(transactional=transactional)
|
Execute the Async set to be run on event.
|
def _http_resp_rate_limited(response):
"""Extract the ``Retry-After`` header value if the request was rate
limited and return a future to sleep for the specified duration.
:param tornado.httpclient.HTTPResponse response: The response
:rtype: tornado.concurrent.Future
"""
parsed = parse.urlparse(response.request.url)
duration = int(response.headers.get('Retry-After', 3))
LOGGER.warning('Rate Limited by %s, retrying in %i seconds',
parsed.netloc, duration)
return asyncio.sleep(duration)
|
Extract the ``Retry-After`` header value if the request was rate
limited and return a future to sleep for the specified duration.
:param tornado.httpclient.HTTPResponse response: The response
:rtype: tornado.concurrent.Future
|
def exit(self, status=0, message=None):
'''
Argparse expects exit() to be a terminal function and not return.
As such, this function must raise an exception which will be caught
by Cmd.hasValidOpts.
'''
self.exited = True
if message is not None:
self.mesgs.extend(message.split('\n'))
raise s_exc.BadSyntax(mesg=message, prog=self.prog, status=status)
|
Argparse expects exit() to be a terminal function and not return.
As such, this function must raise an exception which will be caught
by Cmd.hasValidOpts.
|
def env_float(name, required=False, default=empty):
"""Pulls an environment variable out of the environment and casts it to an
float. If the name is not present in the environment and no default is
specified then a ``ValueError`` will be raised. Similarly, if the
environment value is not castable to an float, a ``ValueError`` will be
raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, required=required, default=default)
if value is empty:
raise ValueError(
"`env_float` requires either a default value to be specified, or for "
"the variable to be present in the environment"
)
return float(value)
|
Pulls an environment variable out of the environment and casts it to an
float. If the name is not present in the environment and no default is
specified then a ``ValueError`` will be raised. Similarly, if the
environment value is not castable to an float, a ``ValueError`` will be
raised.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
|
def flatten(self) -> bk.BKTensor:
"""Return tensor with with qubit indices flattened"""
N = self.qubit_nb
R = self.rank
return bk.reshape(self.tensor, [2**N]*R)
|
Return tensor with with qubit indices flattened
|
def cli(env, is_open):
"""List tickets."""
ticket_mgr = SoftLayer.TicketManager(env.client)
table = formatting.Table([
'id', 'assigned_user', 'title', 'last_edited', 'status', 'updates', 'priority'
])
tickets = ticket_mgr.list_tickets(open_status=is_open, closed_status=not is_open)
for ticket in tickets:
user = formatting.blank()
if ticket.get('assignedUser'):
user = "%s %s" % (ticket['assignedUser']['firstName'], ticket['assignedUser']['lastName'])
table.add_row([
ticket['id'],
user,
click.wrap_text(ticket['title']),
ticket['lastEditDate'],
ticket['status']['name'],
ticket.get('updateCount', 0),
ticket.get('priority', 0)
])
env.fout(table)
|
List tickets.
|
def decode(self):
"""Decode this report into a list of readings
"""
fmt, len_low, len_high, device_id, report_id, sent_timestamp, signature_flags, \
origin_streamer, streamer_selector = unpack("<BBHLLLBBH", self.raw_report[:20])
assert fmt == 1
length = (len_high << 8) | len_low
self.origin = device_id
self.report_id = report_id
self.sent_timestamp = sent_timestamp
self.origin_streamer = origin_streamer
self.streamer_selector = streamer_selector
self.signature_flags = signature_flags
assert len(self.raw_report) == length
remaining = self.raw_report[20:]
assert len(remaining) >= 24
readings = remaining[:-24]
footer = remaining[-24:]
lowest_id, highest_id, signature = unpack("<LL16s", footer)
signature = bytearray(signature)
self.lowest_id = lowest_id
self.highest_id = highest_id
self.signature = signature
signed_data = self.raw_report[:-16]
signer = ChainedAuthProvider()
if signature_flags == AuthProvider.NoKey:
self.encrypted = False
else:
self.encrypted = True
try:
verification = signer.verify_report(device_id, signature_flags, signed_data, signature,
report_id=report_id, sent_timestamp=sent_timestamp)
self.verified = verification['verified']
except NotFoundError:
self.verified = False
# If we were not able to verify the report, do not try to parse or decrypt it since we
# can't guarantee who it came from.
if not self.verified:
return [], []
# If the report is encrypted, try to decrypt it before parsing the readings
if self.encrypted:
try:
result = signer.decrypt_report(device_id, signature_flags, readings,
report_id=report_id, sent_timestamp=sent_timestamp)
readings = result['data']
except NotFoundError:
return [], []
# Now parse all of the readings
# Make sure this report has an integer number of readings
assert (len(readings) % 16) == 0
time_base = self.received_time - datetime.timedelta(seconds=sent_timestamp)
parsed_readings = []
for i in range(0, len(readings), 16):
reading = readings[i:i+16]
stream, _, reading_id, timestamp, value = unpack("<HHLLL", reading)
parsed = IOTileReading(timestamp, stream, value, time_base=time_base, reading_id=reading_id)
parsed_readings.append(parsed)
return parsed_readings, []
|
Decode this report into a list of readings
|
def max(self):
"""
Returns the maximum value of the domain.
:rtype: `float` or `np.inf`
"""
return int(self._max) if not np.isinf(self._max) else self._max
|
Returns the maximum value of the domain.
:rtype: `float` or `np.inf`
|
def set_widgets(self):
"""Set widgets on the Threshold tab."""
clear_layout(self.gridLayoutThreshold)
# Set text in the label
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
layer_subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()
classification = self.parent.step_kw_classification. \
selected_classification()
if is_raster_layer(self.parent.layer):
statistics = self.parent.layer.dataProvider().bandStatistics(
1, QgsRasterBandStats.All, self.parent.layer.extent(), 0)
text = continuous_raster_question % (
layer_purpose['name'],
layer_subcategory['name'],
classification['name'],
statistics.minimumValue,
statistics.maximumValue)
else:
field_name = self.parent.step_kw_field.selected_fields()
field_index = self.parent.layer.fields().lookupField(field_name)
min_value_layer = self.parent.layer.minimumValue(field_index)
max_value_layer = self.parent.layer.maximumValue(field_index)
text = continuous_vector_question % (
layer_purpose['name'],
layer_subcategory['name'],
field_name,
classification['name'],
min_value_layer,
max_value_layer)
self.lblThreshold.setText(text)
thresholds = self.parent.get_existing_keyword('thresholds')
selected_unit = self.parent.step_kw_unit.selected_unit()['key']
self.classes = OrderedDict()
classes = classification.get('classes')
# Sort by value, put the lowest first
classes = sorted(classes, key=lambda k: k['value'])
for i, the_class in enumerate(classes):
class_layout = QHBoxLayout()
# Class label
class_label = QLabel(the_class['name'])
# Min label
min_label = QLabel(tr('Min >'))
# Min value as double spin
min_value_input = QDoubleSpinBox()
# TODO(IS) We can set the min and max depends on the unit, later
min_value_input.setMinimum(0)
min_value_input.setMaximum(999999)
if thresholds.get(the_class['key']):
min_value_input.setValue(thresholds[the_class['key']][0])
else:
default_min = the_class['numeric_default_min']
if isinstance(default_min, dict):
default_min = the_class[
'numeric_default_min'][selected_unit]
min_value_input.setValue(default_min)
min_value_input.setSingleStep(0.1)
# Max label
max_label = QLabel(tr('Max <='))
# Max value as double spin
max_value_input = QDoubleSpinBox()
# TODO(IS) We can set the min and max depends on the unit, later
max_value_input.setMinimum(0)
max_value_input.setMaximum(999999)
if thresholds.get(the_class['key']):
max_value_input.setValue(thresholds[the_class['key']][1])
else:
default_max = the_class['numeric_default_max']
if isinstance(default_max, dict):
default_max = the_class[
'numeric_default_max'][selected_unit]
max_value_input.setValue(default_max)
max_value_input.setSingleStep(0.1)
# Add to class_layout
class_layout.addWidget(min_label)
class_layout.addWidget(min_value_input)
# class_layout.addStretch(1)
class_layout.addWidget(max_label)
class_layout.addWidget(max_value_input)
# Add to grid_layout
self.gridLayoutThreshold.addWidget(class_label, i, 0)
self.gridLayoutThreshold.addLayout(class_layout, i, 1)
self.classes[the_class['key']] = [min_value_input, max_value_input]
self.gridLayoutThreshold.setSpacing(0)
def min_max_changed(index, the_string):
"""Slot when min or max value change.
:param index: The index of the double spin.
:type index: int
:param the_string: The flag to indicate the min or max value.
:type the_string: str
"""
if the_string == 'Max value':
current_max_value = list(self.classes.values())[index][1]
target_min_value = list(self.classes.values())[index + 1][0]
if current_max_value.value() != target_min_value.value():
target_min_value.setValue(current_max_value.value())
elif the_string == 'Min value':
current_min_value = list(self.classes.values())[index][0]
target_max_value = list(self.classes.values())[index - 1][1]
if current_min_value.value() != target_max_value.value():
target_max_value.setValue(current_min_value.value())
# Set behaviour
for k, v in list(self.classes.items()):
index = list(self.classes.keys()).index(k)
if index < len(self.classes) - 1:
# Max value changed
v[1].valueChanged.connect(partial(
min_max_changed, index=index, the_string='Max value'))
if index > 0:
# Min value
v[0].valueChanged.connect(partial(
min_max_changed, index=index, the_string='Min value'))
|
Set widgets on the Threshold tab.
|
def get_rows(self, sort=False):
"""
Returns the rows of this Type2Helper.
:param bool sort: If True the rows are sorted by the pseudo key.
"""
ret = []
for _, rows in sorted(self._rows.items()) if sort else self._rows.items():
self._rows_int2date(rows)
ret.extend(rows)
return ret
|
Returns the rows of this Type2Helper.
:param bool sort: If True the rows are sorted by the pseudo key.
|
def get_cluster_graph(self, engine="fdp", graph_attr=None, node_attr=None, edge_attr=None):
"""
Generate directory graph in the DOT language. Directories are shown as clusters
.. warning::
This function scans the entire directory tree starting from top so the resulting
graph can be really big.
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
"""
# https://www.graphviz.org/doc/info/
from graphviz import Digraph
g = Digraph("directory", #filename="flow_%s.gv" % os.path.basename(self.relworkdir),
engine=engine) # if engine == "automatic" else engine)
# Set graph attributes.
#g.attr(label="%s@%s" % (self.__class__.__name__, self.relworkdir))
g.attr(label=self.top)
#g.attr(fontcolor="white", bgcolor='purple:pink')
#g.attr(rankdir="LR", pagedir="BL")
#g.attr(constraint="false", pack="true", packMode="clust")
g.node_attr.update(color='lightblue2', style='filled')
#g.node_attr.update(ranksep='equally')
# Add input attributes.
if graph_attr is not None:
fg.graph_attr.update(**graph_attr)
if node_attr is not None:
fg.node_attr.update(**node_attr)
if edge_attr is not None:
fg.edge_attr.update(**edge_attr)
def node_kwargs(path):
return dict(
#shape="circle",
#shape="none",
#shape="plaintext",
#shape="point",
shape="record",
#color=node.color_hex,
fontsize="8.0",
label=os.path.basename(path),
)
edge_kwargs = dict(arrowType="vee", style="solid", minlen="1")
cluster_kwargs = dict(rankdir="LR", pagedir="BL", style="rounded", bgcolor="azure2")
# TODO: Write other method without clusters if not walk.
exclude_top_node = False
for root, dirs, files in os.walk(self.top):
if exclude_top_node and root == self.top: continue
cluster_name = "cluster_%s" % root
#print("root", root, cluster_name, "dirs", dirs, "files", files, sep="\n")
with g.subgraph(name=cluster_name) as d:
d.attr(**cluster_kwargs)
d.attr(rank="source" if (files or dirs) else "sink")
d.attr(label=os.path.basename(root))
for f in files:
filepath = os.path.join(root, f)
d.node(filepath, **node_kwargs(filepath))
if os.path.islink(filepath):
# Follow the link and use the relpath wrt link as label.
realp = os.path.realpath(filepath)
realp = os.path.relpath(realp, filepath)
#realp = os.path.relpath(realp, self.top)
#print(filepath, realp)
#g.node(realp, **node_kwargs(realp))
g.edge(filepath, realp, **edge_kwargs)
for dirname in dirs:
dirpath = os.path.join(root, dirname)
#head, basename = os.path.split(dirpath)
new_cluster_name = "cluster_%s" % dirpath
#rank = "source" if os.listdir(dirpath) else "sink"
#g.node(dirpath, rank=rank, **node_kwargs(dirpath))
#g.edge(dirpath, new_cluster_name, **edge_kwargs)
#d.edge(cluster_name, new_cluster_name, minlen="2", **edge_kwargs)
d.edge(cluster_name, new_cluster_name, **edge_kwargs)
return g
|
Generate directory graph in the DOT language. Directories are shown as clusters
.. warning::
This function scans the entire directory tree starting from top so the resulting
graph can be really big.
Args:
engine: Layout command used. ['dot', 'neato', 'twopi', 'circo', 'fdp', 'sfdp', 'patchwork', 'osage']
graph_attr: Mapping of (attribute, value) pairs for the graph.
node_attr: Mapping of (attribute, value) pairs set for all nodes.
edge_attr: Mapping of (attribute, value) pairs set for all edges.
Returns: graphviz.Digraph <https://graphviz.readthedocs.io/en/stable/api.html#digraph>
|
def _define_absl_flag(self, flag_instance, suppress):
"""Defines a flag from the flag_instance."""
flag_name = flag_instance.name
short_name = flag_instance.short_name
argument_names = ['--' + flag_name]
if short_name:
argument_names.insert(0, '-' + short_name)
if suppress:
helptext = argparse.SUPPRESS
else:
# argparse help string uses %-formatting. Escape the literal %'s.
helptext = flag_instance.help.replace('%', '%%')
if flag_instance.boolean:
# Only add the `no` form to the long name.
argument_names.append('--no' + flag_name)
self.add_argument(
*argument_names, action=_BooleanFlagAction, help=helptext,
metavar=flag_instance.name.upper(),
flag_instance=flag_instance)
else:
self.add_argument(
*argument_names, action=_FlagAction, help=helptext,
metavar=flag_instance.name.upper(),
flag_instance=flag_instance)
|
Defines a flag from the flag_instance.
|
def filesys_decode(path):
"""
Ensure that the given path is decoded,
NONE when no expected encoding works
"""
if isinstance(path, six.text_type):
return path
fs_enc = sys.getfilesystemencoding() or 'utf-8'
candidates = fs_enc, 'utf-8'
for enc in candidates:
try:
return path.decode(enc)
except UnicodeDecodeError:
continue
|
Ensure that the given path is decoded,
NONE when no expected encoding works
|
def default_output_format(content_type='application/json', apply_globally=False, api=None, cli=False, http=True):
"""A decorator that allows you to override the default output format for an API"""
def decorator(formatter):
formatter = hug.output_format.content_type(content_type)(formatter)
if apply_globally:
if http:
hug.defaults.output_format = formatter
if cli:
hug.defaults.cli_output_format = formatter
else:
apply_to_api = hug.API(api) if api else hug.api.from_object(formatter)
if http:
apply_to_api.http.output_format = formatter
if cli:
apply_to_api.cli.output_format = formatter
return formatter
return decorator
|
A decorator that allows you to override the default output format for an API
|
def get_namespace(self, uri):
"""Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
"""
key = (self, uri)
if key in self.context.namespaces:
return self.context.namespaces[key]
else:
ns = TemplateNamespace(uri, self.context._copy(),
templateuri=uri,
calling_uri=self._templateuri)
self.context.namespaces[key] = ns
return ns
|
Return a :class:`.Namespace` corresponding to the given ``uri``.
If the given ``uri`` is a relative URI (i.e. it does not
contain a leading slash ``/``), the ``uri`` is adjusted to
be relative to the ``uri`` of the namespace itself. This
method is therefore mostly useful off of the built-in
``local`` namespace, described in :ref:`namespace_local`.
In
most cases, a template wouldn't need this function, and
should instead use the ``<%namespace>`` tag to load
namespaces. However, since all ``<%namespace>`` tags are
evaluated before the body of a template ever runs,
this method can be used to locate namespaces using
expressions that were generated within the body code of
the template, or to conditionally use a particular
namespace.
|
def fetch_artifact(self, trial_id, prefix):
"""
Verifies that all children of the artifact prefix path are
available locally. Fetches them if not.
Returns the local path to the given trial's artifacts at the
specified prefix, which is always just
{log_dir}/{trial_id}/{prefix}
"""
# TODO: general windows concern: local prefix will be in
# backslashes but remote dirs will be expecting /
# TODO: having s3 logic split between project and sync.py
# worries me
local = os.path.join(self.log_dir, trial_id, prefix)
if self.upload_dir:
remote = '/'.join([self.upload_dir, trial_id, prefix])
_remote_to_local_sync(remote, local)
return local
|
Verifies that all children of the artifact prefix path are
available locally. Fetches them if not.
Returns the local path to the given trial's artifacts at the
specified prefix, which is always just
{log_dir}/{trial_id}/{prefix}
|
def _input_as_list(self, data):
'''Takes the positional arguments as input in a list.
The list input here should be [query_file_path, database_file_path,
output_file_path]'''
query, database, output = data
if (not isabs(database)) \
or (not isabs(query)) \
or (not isabs(output)):
raise ApplicationError("Only absolute paths allowed.\n%s" %
', '.join(data))
self._database = FilePath(database)
self._query = FilePath(query)
self._output = ResultPath(output, IsWritten=True)
# check parameters that can only take a particular set of values
# check combination of databse and query type
if self.Parameters['-t'].isOn() and self.Parameters['-q'].isOn() and \
(self.Parameters['-t'].Value, self.Parameters['-q'].Value) not in \
self._valid_combinations:
error_message = "Invalid combination of database and query " + \
"types ('%s', '%s').\n" % \
(self.Paramters['-t'].Value,
self.Parameters['-q'].Value)
error_message += "Must be one of: %s\n" % \
repr(self._valid_combinations)
raise ApplicationError(error_message)
# check database type
if self.Parameters['-t'].isOn() and \
self.Parameters['-t'].Value not in self._database_types:
error_message = "Invalid database type %s\n" % \
self.Parameters['-t'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._database_types)
raise ApplicationError(error_message)
# check query type
if self.Parameters['-q'].isOn() and \
self.Parameters['-q'].Value not in self._query_types:
error_message = "Invalid query type %s\n" % \
self.Parameters['-q'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._query_types)
raise ApplicationError(error_message)
# check mask type
if self.Parameters['-mask'].isOn() and \
self.Parameters['-mask'].Value not in self._mask_types:
error_message = "Invalid mask type %s\n" % \
self.Parameters['-mask']
error_message += "Allowed Values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check qmask type
if self.Parameters['-qMask'].isOn() and \
self.Parameters['-qMask'].Value not in self._mask_types:
error_message = "Invalid qMask type %s\n" % \
self.Parameters['-qMask'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check repeat type
if self.Parameters['-repeats'].isOn() and \
self.Parameters['-repeats'].Value not in self._mask_types:
error_message = "Invalid repeat type %s\n" % \
self.Parameters['-repeat'].Value
error_message += "Allowed values: %s\n" % \
', '.join(self._mask_types)
raise ApplicationError(error_message)
# check output format
if self.Parameters['-out'].isOn() and \
self.Parameters['-out'].Value not in self._out_types:
error_message = "Invalid output type %s\n" % \
self.Parameters['-out']
error_message += "Allowed values: %s\n" % \
', '.join(self._out_types)
raise ApplicationError(error_message)
return ''
|
Takes the positional arguments as input in a list.
The list input here should be [query_file_path, database_file_path,
output_file_path]
|
def _set_zone(self, v, load=False):
"""
Setter method for zone, mapped from YANG variable /zoning/defined_configuration/zone (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_zone is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_zone() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("zone_name",zone.zone, yang_name="zone", rest_name="zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of defined Zones', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_zone'}}), is_container='list', yang_name="zone", rest_name="zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined Zones', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_zone'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """zone must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("zone_name",zone.zone, yang_name="zone", rest_name="zone", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='zone-name', extensions={u'tailf-common': {u'info': u'List of defined Zones', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_zone'}}), is_container='list', yang_name="zone", rest_name="zone", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'List of defined Zones', u'cli-no-key-completion': None, u'cli-full-no': None, u'cli-suppress-key-abbreviation': None, u'cli-full-command': None, u'callpoint': u'zone_defined_zone'}}, namespace='urn:brocade.com:mgmt:brocade-zone', defining_module='brocade-zone', yang_type='list', is_config=True)""",
})
self.__zone = t
if hasattr(self, '_set'):
self._set()
|
Setter method for zone, mapped from YANG variable /zoning/defined_configuration/zone (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_zone is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_zone() directly.
|
def affine_shift_matrix(wrg=(-0.1, 0.1), hrg=(-0.1, 0.1), w=200, h=200):
"""Create an affine transform matrix for image shifting.
NOTE: In OpenCV, x is width and y is height.
Parameters
-----------
wrg : float or tuple of floats
Range to shift on width axis, -1 ~ 1.
- float, a fixed distance.
- tuple of 2 floats, randomly sample a value as the distance between these 2 values.
hrg : float or tuple of floats
Range to shift on height axis, -1 ~ 1.
- float, a fixed distance.
- tuple of 2 floats, randomly sample a value as the distance between these 2 values.
w, h : int
The width and height of the image.
Returns
-------
numpy.array
An affine transform matrix.
"""
if isinstance(wrg, tuple):
tx = np.random.uniform(wrg[0], wrg[1]) * w
else:
tx = wrg * w
if isinstance(hrg, tuple):
ty = np.random.uniform(hrg[0], hrg[1]) * h
else:
ty = hrg * h
shift_matrix = np.array([[1, 0, tx], \
[0, 1, ty], \
[0, 0, 1]])
return shift_matrix
|
Create an affine transform matrix for image shifting.
NOTE: In OpenCV, x is width and y is height.
Parameters
-----------
wrg : float or tuple of floats
Range to shift on width axis, -1 ~ 1.
- float, a fixed distance.
- tuple of 2 floats, randomly sample a value as the distance between these 2 values.
hrg : float or tuple of floats
Range to shift on height axis, -1 ~ 1.
- float, a fixed distance.
- tuple of 2 floats, randomly sample a value as the distance between these 2 values.
w, h : int
The width and height of the image.
Returns
-------
numpy.array
An affine transform matrix.
|
def add_node(self, node_id, name, labels):
"""Add the node with name and labels.
Args:
node_id: Id for the node.
name: Name for the node.
labels: Label for the node.
Raises:
NotImplementedError: When adding labels is not supported.
"""
node = self.graph_db.get_or_create_indexed_node('Node', 'node_id', node_id, {'node_id': node_id, 'name': name})
try:
node.add_labels(*labels)
except NotImplementedError:
pass
|
Add the node with name and labels.
Args:
node_id: Id for the node.
name: Name for the node.
labels: Label for the node.
Raises:
NotImplementedError: When adding labels is not supported.
|
def create_function_f_i(self):
"""state reinitialization (reset) function"""
return ca.Function(
'f_i',
[self.t, self.x, self.y, self.m, self.p, self.c, self.pre_c, self.ng, self.nu],
[self.f_i],
['t', 'x', 'y', 'm', 'p', 'c', 'pre_c', 'ng', 'nu'], ['x_n'], self.func_opt)
|
state reinitialization (reset) function
|
def UV_B(Bg,gw):
"""
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
"""
UV = []
p = Bwidth(gw)
pp = 2**p
while p:
pp = pp>>1
p = p-1
if Bg&pp:
uv = B012(p,gw-1)
UV.append(uv)
return UV
|
returns the implications UV based on B
Bg = B(g), g∈2^M
gw = |M|, M is the set of all attributes
|
def currentEvent(self):
'''
Return the first event that hasn't ended yet, or if there are no
future events, the last one to end.
'''
currentEvent = self.recentEvents.filter(endTime__gte=timezone.now()).order_by('startTime').first()
if not currentEvent:
currentEvent = self.recentEvents.filter(
endTime__lte=timezone.now()
).order_by('-endTime').first()
return currentEvent
|
Return the first event that hasn't ended yet, or if there are no
future events, the last one to end.
|
def get_matching_service_template_file(service_name, template_files):
"""
Return the template file that goes with the given service name, or return
None if there's no match. Subservices return the parent service's file.
"""
# If this is a subservice, use the parent service's template
service_name = service_name.split('.')[0]
if service_name in template_files:
return template_files[service_name]
return None
|
Return the template file that goes with the given service name, or return
None if there's no match. Subservices return the parent service's file.
|
def flags(rule_or_module, variable_name, condition, values = []):
""" Specifies the flags (variables) that must be set on targets under certain
conditions, described by arguments.
rule_or_module: If contains dot, should be a rule name.
The flags will be applied when that rule is
used to set up build actions.
If does not contain dot, should be a module name.
The flags will be applied for all rules in that
module.
If module for rule is different from the calling
module, an error is issued.
variable_name: Variable that should be set on target
condition A condition when this flag should be applied.
Should be set of property sets. If one of
those property sets is contained in build
properties, the flag will be used.
Implied values are not allowed:
"<toolset>gcc" should be used, not just
"gcc". Subfeatures, like in "<toolset>gcc-3.2"
are allowed. If left empty, the flag will
always used.
Propery sets may use value-less properties
('<a>' vs. '<a>value') to match absent
properties. This allows to separately match
<architecture>/<address-model>64
<architecture>ia64/<address-model>
Where both features are optional. Without this
syntax we'd be forced to define "default" value.
values: The value to add to variable. If <feature>
is specified, then the value of 'feature'
will be added.
"""
assert isinstance(rule_or_module, basestring)
assert isinstance(variable_name, basestring)
assert is_iterable_typed(condition, basestring)
assert is_iterable(values) and all(isinstance(v, (basestring, type(None))) for v in values)
caller = bjam.caller()
if not '.' in rule_or_module and caller and caller[:-1].startswith("Jamfile"):
# Unqualified rule name, used inside Jamfile. Most likely used with
# 'make' or 'notfile' rules. This prevents setting flags on the entire
# Jamfile module (this will be considered as rule), but who cares?
# Probably, 'flags' rule should be split into 'flags' and
# 'flags-on-module'.
rule_or_module = qualify_jam_action(rule_or_module, caller)
else:
# FIXME: revive checking that we don't set flags for a different
# module unintentionally
pass
if condition and not replace_grist (condition, ''):
# We have condition in the form '<feature>', that is, without
# value. That's a previous syntax:
#
# flags gcc.link RPATH <dll-path> ;
# for compatibility, convert it to
# flags gcc.link RPATH : <dll-path> ;
values = [ condition ]
condition = None
if condition:
transformed = []
for c in condition:
# FIXME: 'split' might be a too raw tool here.
pl = [property.create_from_string(s,False,True) for s in c.split('/')]
pl = feature.expand_subfeatures(pl);
transformed.append(property_set.create(pl))
condition = transformed
property.validate_property_sets(condition)
__add_flag (rule_or_module, variable_name, condition, values)
|
Specifies the flags (variables) that must be set on targets under certain
conditions, described by arguments.
rule_or_module: If contains dot, should be a rule name.
The flags will be applied when that rule is
used to set up build actions.
If does not contain dot, should be a module name.
The flags will be applied for all rules in that
module.
If module for rule is different from the calling
module, an error is issued.
variable_name: Variable that should be set on target
condition A condition when this flag should be applied.
Should be set of property sets. If one of
those property sets is contained in build
properties, the flag will be used.
Implied values are not allowed:
"<toolset>gcc" should be used, not just
"gcc". Subfeatures, like in "<toolset>gcc-3.2"
are allowed. If left empty, the flag will
always used.
Propery sets may use value-less properties
('<a>' vs. '<a>value') to match absent
properties. This allows to separately match
<architecture>/<address-model>64
<architecture>ia64/<address-model>
Where both features are optional. Without this
syntax we'd be forced to define "default" value.
values: The value to add to variable. If <feature>
is specified, then the value of 'feature'
will be added.
|
def NDLimitExceeded_NDLimit(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
NDLimitExceeded = ET.SubElement(config, "NDLimitExceeded", xmlns="http://brocade.com/ns/brocade-notification-stream")
NDLimit = ET.SubElement(NDLimitExceeded, "NDLimit")
NDLimit.text = kwargs.pop('NDLimit')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.