code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _updateModelDBResults(self):
""" Retrieves the current results and updates the model's record in
the Model database.
"""
# -----------------------------------------------------------------------
# Get metrics
metrics = self._getMetrics()
# -----------------------------------------------------------------------
# Extract report metrics that match the requested report REs
reportDict = dict([(k,metrics[k]) for k in self._reportMetricLabels])
# -----------------------------------------------------------------------
# Extract the report item that matches the optimize key RE
# TODO cache optimizedMetricLabel sooner
metrics = self._getMetrics()
optimizeDict = dict()
if self._optimizeKeyPattern is not None:
optimizeDict[self._optimizedMetricLabel] = \
metrics[self._optimizedMetricLabel]
# -----------------------------------------------------------------------
# Update model results
results = json.dumps((metrics , optimizeDict))
self._jobsDAO.modelUpdateResults(self._modelID, results=results,
metricValue=optimizeDict.values()[0],
numRecords=(self._currentRecordIndex + 1))
self._logger.debug(
"Model Results: modelID=%s; numRecords=%s; results=%s" % \
(self._modelID, self._currentRecordIndex + 1, results))
return
|
Retrieves the current results and updates the model's record in
the Model database.
|
def sort_cyclic_graph_best_effort(graph, pick_first='head'):
"""Fallback for cases in which the graph has cycles."""
ordered = []
visited = set()
# Go first on the pick_first chain then go back again on the others
# that were not visited. Given the way the graph is built both chains
# will always contain all the elements.
if pick_first == 'head':
fst_attr, snd_attr = ('head_node', 'update_node')
else:
fst_attr, snd_attr = ('update_node', 'head_node')
current = FIRST
while current is not None:
visited.add(current)
current = getattr(graph[current], fst_attr)
if current not in visited and current is not None:
ordered.append(current)
current = FIRST
while current is not None:
visited.add(current)
current = getattr(graph[current], snd_attr)
if current not in visited and current is not None:
ordered.append(current)
return ordered
|
Fallback for cases in which the graph has cycles.
|
def download_manifest_v2(self, manifest, replica,
num_retries=10,
min_delay_seconds=0.25,
download_dir='.'):
"""
Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it.
The files are downloaded in the version 2 format.
This download format will serve as the main storage format for downloaded files. If a user specifies a different
format for download (coming in the future) the files will first be downloaded in this format, then hard-linked
to the user's preferred format.
:param str manifest: path to a TSV (tab-separated values) file listing files to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Process the given manifest file in TSV (tab-separated values) format and download the files
referenced by it.
Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row
must declare the following columns:
* `file_uuid` - the UUID of the file in DSS.
* `file_version` - the version of the file in DSS.
The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is
insignificant because the TSV is required to have a header row.
"""
fieldnames, rows = self._parse_manifest(manifest)
errors = 0
with concurrent.futures.ThreadPoolExecutor(max_workers=self.threads) as executor:
futures_to_dss_file = {}
for row in rows:
dss_file = DSSFile.from_manifest_row(row, replica)
future = executor.submit(self._download_to_filestore, download_dir, dss_file,
num_retries=num_retries, min_delay_seconds=min_delay_seconds)
futures_to_dss_file[future] = dss_file
for future in concurrent.futures.as_completed(futures_to_dss_file):
dss_file = futures_to_dss_file[future]
try:
future.result()
except Exception as e:
errors += 1
logger.warning('Failed to download file %s version %s from replica %s',
dss_file.uuid, dss_file.version, dss_file.replica, exc_info=e)
if errors:
raise RuntimeError('{} file(s) failed to download'.format(errors))
else:
self._write_output_manifest(manifest, download_dir)
|
Process the given manifest file in TSV (tab-separated values) format and download the files referenced by it.
The files are downloaded in the version 2 format.
This download format will serve as the main storage format for downloaded files. If a user specifies a different
format for download (coming in the future) the files will first be downloaded in this format, then hard-linked
to the user's preferred format.
:param str manifest: path to a TSV (tab-separated values) file listing files to download
:param str replica: the replica to download from. The supported replicas are: `aws` for Amazon Web Services, and
`gcp` for Google Cloud Platform. [aws, gcp]
:param int num_retries: The initial quota of download failures to accept before exiting due to
failures. The number of retries increase and decrease as file chucks succeed and fail.
:param float min_delay_seconds: The minimum number of seconds to wait in between retries.
Process the given manifest file in TSV (tab-separated values) format and download the files
referenced by it.
Each row in the manifest represents one file in DSS. The manifest must have a header row. The header row
must declare the following columns:
* `file_uuid` - the UUID of the file in DSS.
* `file_version` - the version of the file in DSS.
The TSV may have additional columns. Those columns will be ignored. The ordering of the columns is
insignificant because the TSV is required to have a header row.
|
def edit(self, **args):
'''
Doesn't require manual fetching of gistID of a gist
passing gistName will return edit the gist
'''
self.gist_name = ''
if 'description' in args:
self.description = args['description']
else:
self.description = ''
if 'name' in args and 'id' in args:
self.gist_name = args['name']
self.gist_id = args['id']
elif 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Gist Name/ID must be provided')
if 'content' in args:
self.content = args['content']
else:
raise Exception('Gist content can\'t be empty')
if (self.gist_name == ''):
self.gist_name = self.getgist(id=self.gist_id)
data = {"description": self.description,
"files": {
self.gist_name: {
"content": self.content
}
}
}
else:
data = {"description": self.description,
"files": {
self.gist_name: {
"content": self.content
}
}
}
if self.gist_id:
r = requests.patch(
'%s/gists/%s'%(BASE_URL,self.gist_id),
headers=self.gist.header,
data=json.dumps(data),
)
if (r.status_code == 200):
r_text = json.loads(r.text)
response = {
'updated_content': self.content,
'created_at': r.json()['created_at'],
'comments':r.json()['comments']
}
return response
raise Exception('No such gist found')
|
Doesn't require manual fetching of gistID of a gist
passing gistName will return edit the gist
|
def change_selected(self,new_fit):
"""
updates passed in fit or index as current fit for the editor (does not affect parent),
if no parameters are passed in it sets first fit as current
@param: new_fit -> fit object to highlight as selected
"""
if len(self.fit_list)==0: return
if self.search_query and self.parent.current_fit not in [x[0] for x in self.fit_list]: return
if self.current_fit_index == None:
if not self.parent.current_fit: return
for i,(fit,specimen) in enumerate(self.fit_list):
if fit == self.parent.current_fit:
self.current_fit_index = i
break
i = 0
if isinstance(new_fit, Fit):
for i, (fit,speci) in enumerate(self.fit_list):
if fit == new_fit:
break
elif type(new_fit) is int:
i = new_fit
elif new_fit != None:
print(('cannot select fit of type: ' + str(type(new_fit))))
if self.current_fit_index != None and \
len(self.fit_list) > 0 and \
self.fit_list[self.current_fit_index][0] in self.parent.bad_fits:
self.logger.SetItemBackgroundColour(self.current_fit_index,"")
else:
self.logger.SetItemBackgroundColour(self.current_fit_index,"WHITE")
self.current_fit_index = i
if self.fit_list[self.current_fit_index][0] in self.parent.bad_fits:
self.logger.SetItemBackgroundColour(self.current_fit_index,"red")
else:
self.logger.SetItemBackgroundColour(self.current_fit_index,"LIGHT BLUE")
|
updates passed in fit or index as current fit for the editor (does not affect parent),
if no parameters are passed in it sets first fit as current
@param: new_fit -> fit object to highlight as selected
|
def has_false(self, e, extra_constraints=(), solver=None, model_callback=None): #pylint:disable=unused-argument
"""
Should return False if `e` can possibly be False.
:param e: The AST.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
"""
#if self._solver_required and solver is None:
# raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__)
return self._has_false(self.convert(e), extra_constraints=extra_constraints, solver=solver, model_callback=model_callback)
|
Should return False if `e` can possibly be False.
:param e: The AST.
:param extra_constraints: Extra constraints (as ASTs) to add to the solver for this solve.
:param solver: A solver, for backends that require it.
:param model_callback: a function that will be executed with recovered models (if any)
:return: A boolean.
|
def _ansi_color(code, theme):
"""
Converts an ansi code to a QColor, taking the color scheme (theme) into account.
"""
red = 170 if code & 1 else 0
green = 170 if code & 2 else 0
blue = 170 if code & 4 else 0
color = QtGui.QColor(red, green, blue)
if theme is not None:
mappings = {
'#aa0000': theme.red,
'#00aa00': theme.green,
'#aaaa00': theme.yellow,
'#0000aa': theme.blue,
'#aa00aa': theme.magenta,
'#00aaaa': theme.cyan,
'#000000': theme.background,
"#ffffff": theme.foreground
}
try:
return mappings[color.name()]
except KeyError:
pass
return color
|
Converts an ansi code to a QColor, taking the color scheme (theme) into account.
|
def many(parser):
"""Applies the parser to input zero or more times.
Returns a list of parser results.
"""
results = []
terminate = object()
while local_ps.value:
result = optional(parser, terminate)
if result == terminate:
break
results.append(result)
return results
|
Applies the parser to input zero or more times.
Returns a list of parser results.
|
def size(self):
""" Returns the total number of queued jobs on the queue """
if self.id.endswith("/"):
subqueues = self.get_known_subqueues()
if len(subqueues) == 0:
return 0
else:
with context.connections.redis.pipeline(transaction=False) as pipe:
for subqueue in subqueues:
pipe.get("queuesize:%s" % subqueue)
return [int(size or 0) for size in pipe.execute()]
else:
return int(context.connections.redis.get("queuesize:%s" % self.id) or 0)
|
Returns the total number of queued jobs on the queue
|
def process_event(self, event_id):
"""Process event in Celery."""
with db.session.begin_nested():
event = Event.query.get(event_id)
event._celery_task = self # internal binding to a Celery task
event.receiver.run(event) # call run directly to avoid circular calls
flag_modified(event, 'response')
flag_modified(event, 'response_headers')
db.session.add(event)
db.session.commit()
|
Process event in Celery.
|
def _update_capacity(self, data):
""" Update the consumed capacity metrics """
if 'ConsumedCapacity' in data:
# This is all for backwards compatibility
consumed = data['ConsumedCapacity']
if not isinstance(consumed, list):
consumed = [consumed]
for cap in consumed:
self.capacity += cap.get('CapacityUnits', 0)
self.table_capacity += cap.get('Table',
{}).get('CapacityUnits', 0)
local_indexes = cap.get('LocalSecondaryIndexes', {})
for k, v in six.iteritems(local_indexes):
self.indexes.setdefault(k, 0)
self.indexes[k] += v['CapacityUnits']
global_indexes = cap.get('GlobalSecondaryIndexes', {})
for k, v in six.iteritems(global_indexes):
self.global_indexes.setdefault(k, 0)
self.global_indexes[k] += v['CapacityUnits']
|
Update the consumed capacity metrics
|
def __valueKeyWithHeaderIndex(self, values):
"""
This is hellper function, so that we can mach decision values with row index
as represented in header index.
Args:
values (dict): Normaly this will have dict of header values and values from decision
Return:
>>> return()
{
values[headerName] : int(headerName index in header array),
...
}
"""
machingIndexes = {}
for index, name in enumerate(self.header):
if name in values:
machingIndexes[index] = values[name]
return machingIndexes
|
This is hellper function, so that we can mach decision values with row index
as represented in header index.
Args:
values (dict): Normaly this will have dict of header values and values from decision
Return:
>>> return()
{
values[headerName] : int(headerName index in header array),
...
}
|
def swap(self, old_chunks, new_chunk):
"""Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
"""
indexes = [self.index(chunk) for chunk in old_chunks]
del self[indexes[0]:indexes[-1] + 1]
self.insert(indexes[0], new_chunk)
|
Swaps old consecutive chunks with new chunk.
Args:
old_chunks (:obj:`budou.chunk.ChunkList`): List of consecutive Chunks to
be removed.
new_chunk (:obj:`budou.chunk.Chunk`): A Chunk to be inserted.
|
def load(cls, primary_key, convert_key=True):
"""
Retrieve a model instance by primary key.
:param primary_key: The primary key of the model instance.
:returns: Corresponding :py:class:`Model` instance.
:raises: ``KeyError`` if object with given primary key does
not exist.
"""
if convert_key:
primary_key = cls._query.get_primary_hash_key(primary_key)
if not cls.__database__.hash_exists(primary_key):
raise KeyError('Object not found.')
raw_data = cls.__database__.hgetall(primary_key)
if PY3:
raw_data = decode_dict_keys(raw_data)
data = {}
for name, field in cls._fields.items():
if isinstance(field, _ContainerField):
continue
elif name in raw_data:
data[name] = field.python_value(raw_data[name])
else:
data[name] = None
return cls(**data)
|
Retrieve a model instance by primary key.
:param primary_key: The primary key of the model instance.
:returns: Corresponding :py:class:`Model` instance.
:raises: ``KeyError`` if object with given primary key does
not exist.
|
def read(self):
""" read default csp settings from json file """
with open(self.default_file) as json_file:
try:
return json.load(json_file)
except Exception as e:
raise 'empty file'
|
read default csp settings from json file
|
def _init_settings(self):
""" Init setting """
self._show_whitespaces = False
self._tab_length = 4
self._use_spaces_instead_of_tabs = True
self.setTabStopWidth(self._tab_length *
self.fontMetrics().width(" "))
self._set_whitespaces_flags(self._show_whitespaces)
|
Init setting
|
def summary(self, raw):
"""Use the Backscatter.io summary data to create a view."""
taxonomies = list()
level = 'info'
namespace = 'Backscatter.io'
if self.service == 'observations':
summary = raw.get('results', dict()).get('summary', dict())
taxonomies = taxonomies + [
self.build_taxonomy(level, namespace, 'Observations', summary.get('observations_count', 0)),
self.build_taxonomy(level, namespace, 'IP Addresses', summary.get('ip_address_count', 0)),
self.build_taxonomy(level, namespace, 'Networks', summary.get('network_count', 0)),
self.build_taxonomy(level, namespace, 'AS', summary.get('autonomous_system_count', 0)),
self.build_taxonomy(level, namespace, 'Ports', summary.get('port_count', 0)),
self.build_taxonomy(level, namespace, 'Protocols', summary.get('protocol_count', 0))
]
elif self.service == 'enrichment':
summary = raw.get('results', dict())
if self.data_type == 'ip':
taxonomies = taxonomies + [
self.build_taxonomy(level, namespace, 'Network', summary.get('network')),
self.build_taxonomy(level, namespace, 'Network Broadcast', summary.get('network_broadcast')),
self.build_taxonomy(level, namespace, 'Network Size', summary.get('network_size')),
self.build_taxonomy(level, namespace, 'Country', summary.get('country_name')),
self.build_taxonomy(level, namespace, 'AS Number', summary.get('as_num')),
self.build_taxonomy(level, namespace, 'AS Name', summary.get('as_name')),
]
elif self.data_type == 'network':
taxonomies = taxonomies + [
self.build_taxonomy(level, namespace, 'Network Size', summary.get('network_size'))
]
elif self.data_type == 'autonomous-system':
taxonomies = taxonomies + [
self.build_taxonomy(level, namespace, 'Prefix Count', summary.get('prefix_count')),
self.build_taxonomy(level, namespace, 'AS Number', summary.get('as_num')),
self.build_taxonomy(level, namespace, 'AS Name', summary.get('as_name'))
]
elif self.data_type == 'port':
for result in raw.get('results', list()):
display = "%s (%s)" % (result.get('service'), result.get('protocol'))
taxonomies.append(self.build_taxonomy(level, namespace, 'Service', display))
else:
pass
else:
pass
return {"taxonomies": taxonomies}
|
Use the Backscatter.io summary data to create a view.
|
def _handleAuthorizedEvents(self, component, action, data, user, client):
"""Isolated communication link for authorized events."""
try:
if component == "debugger":
self.log(component, action, data, user, client, lvl=info)
if not user and component in self.authorized_events.keys():
self.log("Unknown client tried to do an authenticated "
"operation: %s",
component, action, data, user)
return
event = self.authorized_events[component][action]['event'](user, action, data, client)
self.log('Authorized event roles:', event.roles, lvl=verbose)
if not self._checkPermissions(user, event):
result = {
'component': 'hfos.ui.clientmanager',
'action': 'Permission',
'data': _('You have no role that allows this action.', lang='de')
}
self.fireEvent(send(event.client.uuid, result))
return
self.log("Firing authorized event: ", component, action,
str(data)[:100], lvl=debug)
# self.log("", (user, action, data, client), lvl=critical)
self.fireEvent(event)
except Exception as e:
self.log("Critical error during authorized event handling:",
component, action, e,
type(e), lvl=critical, exc=True)
|
Isolated communication link for authorized events.
|
def alphabetize_attributes(self):
"""
Orders attributes names alphabetically, except for the class attribute, which is kept last.
"""
self.attributes.sort(key=lambda name: (name == self.class_attr_name, name))
|
Orders attributes names alphabetically, except for the class attribute, which is kept last.
|
def setup_and_check(self, data, title='', readonly=False,
xlabels=None, ylabels=None):
"""
Setup ArrayEditor:
return False if data is not supported, True otherwise
"""
self.data = data
readonly = readonly or not self.data.flags.writeable
is_record_array = data.dtype.names is not None
is_masked_array = isinstance(data, np.ma.MaskedArray)
if data.ndim > 3:
self.error(_("Arrays with more than 3 dimensions are not "
"supported"))
return False
if xlabels is not None and len(xlabels) != self.data.shape[1]:
self.error(_("The 'xlabels' argument length do no match array "
"column number"))
return False
if ylabels is not None and len(ylabels) != self.data.shape[0]:
self.error(_("The 'ylabels' argument length do no match array row "
"number"))
return False
if not is_record_array:
dtn = data.dtype.name
if dtn not in SUPPORTED_FORMATS and not dtn.startswith('str') \
and not dtn.startswith('unicode'):
arr = _("%s arrays") % data.dtype.name
self.error(_("%s are currently not supported") % arr)
return False
self.layout = QGridLayout()
self.setLayout(self.layout)
self.setWindowIcon(ima.icon('arredit'))
if title:
title = to_text_string(title) + " - " + _("NumPy array")
else:
title = _("Array editor")
if readonly:
title += ' (' + _('read only') + ')'
self.setWindowTitle(title)
self.resize(600, 500)
# Stack widget
self.stack = QStackedWidget(self)
if is_record_array:
for name in data.dtype.names:
self.stack.addWidget(ArrayEditorWidget(self, data[name],
readonly, xlabels, ylabels))
elif is_masked_array:
self.stack.addWidget(ArrayEditorWidget(self, data, readonly,
xlabels, ylabels))
self.stack.addWidget(ArrayEditorWidget(self, data.data, readonly,
xlabels, ylabels))
self.stack.addWidget(ArrayEditorWidget(self, data.mask, readonly,
xlabels, ylabels))
elif data.ndim == 3:
pass
else:
self.stack.addWidget(ArrayEditorWidget(self, data, readonly,
xlabels, ylabels))
self.arraywidget = self.stack.currentWidget()
if self.arraywidget:
self.arraywidget.model.dataChanged.connect(
self.save_and_close_enable)
self.stack.currentChanged.connect(self.current_widget_changed)
self.layout.addWidget(self.stack, 1, 0)
# Buttons configuration
btn_layout = QHBoxLayout()
if is_record_array or is_masked_array or data.ndim == 3:
if is_record_array:
btn_layout.addWidget(QLabel(_("Record array fields:")))
names = []
for name in data.dtype.names:
field = data.dtype.fields[name]
text = name
if len(field) >= 3:
title = field[2]
if not is_text_string(title):
title = repr(title)
text += ' - '+title
names.append(text)
else:
names = [_('Masked data'), _('Data'), _('Mask')]
if data.ndim == 3:
# QSpinBox
self.index_spin = QSpinBox(self, keyboardTracking=False)
self.index_spin.valueChanged.connect(self.change_active_widget)
# QComboBox
names = [str(i) for i in range(3)]
ra_combo = QComboBox(self)
ra_combo.addItems(names)
ra_combo.currentIndexChanged.connect(self.current_dim_changed)
# Adding the widgets to layout
label = QLabel(_("Axis:"))
btn_layout.addWidget(label)
btn_layout.addWidget(ra_combo)
self.shape_label = QLabel()
btn_layout.addWidget(self.shape_label)
label = QLabel(_("Index:"))
btn_layout.addWidget(label)
btn_layout.addWidget(self.index_spin)
self.slicing_label = QLabel()
btn_layout.addWidget(self.slicing_label)
# set the widget to display when launched
self.current_dim_changed(self.last_dim)
else:
ra_combo = QComboBox(self)
ra_combo.currentIndexChanged.connect(self.stack.setCurrentIndex)
ra_combo.addItems(names)
btn_layout.addWidget(ra_combo)
if is_masked_array:
label = QLabel(_("<u>Warning</u>: changes are applied separately"))
label.setToolTip(_("For performance reasons, changes applied "\
"to masked array won't be reflected in "\
"array's data (and vice-versa)."))
btn_layout.addWidget(label)
btn_layout.addStretch()
if not readonly:
self.btn_save_and_close = QPushButton(_('Save and Close'))
self.btn_save_and_close.setDisabled(True)
self.btn_save_and_close.clicked.connect(self.accept)
btn_layout.addWidget(self.btn_save_and_close)
self.btn_close = QPushButton(_('Close'))
self.btn_close.setAutoDefault(True)
self.btn_close.setDefault(True)
self.btn_close.clicked.connect(self.reject)
btn_layout.addWidget(self.btn_close)
self.layout.addLayout(btn_layout, 2, 0)
self.setMinimumSize(400, 300)
# Make the dialog act as a window
self.setWindowFlags(Qt.Window)
return True
|
Setup ArrayEditor:
return False if data is not supported, True otherwise
|
def linkify_sd_by_s(self, hosts, services):
"""Replace dependent_service_description and service_description
in service dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:param services: service list to look for a specific one
:type services: alignak.objects.service.Services
:return: None
"""
to_del = []
errors = self.configuration_errors
warns = self.configuration_warnings
for servicedep in self:
try:
s_name = servicedep.dependent_service_description
hst_name = servicedep.dependent_host_name
# The new member list, in id
serv = services.find_srv_by_name_and_hostname(hst_name, s_name)
if serv is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
errors.append("Service %s not found for host %s" % (s_name, hst_name))
elif host:
warns.append("Service %s is excluded from host %s ; "
"removing this servicedependency as it's unusuable."
% (s_name, hst_name))
to_del.append(servicedep)
continue
servicedep.dependent_service_description = serv.uuid
s_name = servicedep.service_description
hst_name = servicedep.host_name
# The new member list, in id
serv = services.find_srv_by_name_and_hostname(hst_name, s_name)
if serv is None:
host = hosts.find_by_name(hst_name)
if not (host and host.is_excluded_for_sdesc(s_name)):
errors.append("Service %s not found for host %s" % (s_name, hst_name))
elif host:
warns.append("Service %s is excluded from host %s ; "
"removing this servicedependency as it's unusuable."
% (s_name, hst_name))
to_del.append(servicedep)
continue
servicedep.service_description = serv.uuid
except AttributeError as err:
logger.error("[servicedependency] fail to linkify by service %s: %s",
servicedep, err)
to_del.append(servicedep)
for servicedep in to_del:
self.remove_item(servicedep)
|
Replace dependent_service_description and service_description
in service dependency by the real object
:param hosts: host list, used to look for a specific one
:type hosts: alignak.objects.host.Hosts
:param services: service list to look for a specific one
:type services: alignak.objects.service.Services
:return: None
|
def cluster(self, input_fasta_list, reverse_pipe):
'''
cluster - Clusters reads at 100% identity level and writes them to
file. Resets the input_fasta variable as the FASTA file containing the
clusters.
Parameters
----------
input_fasta_list : list
list of strings, each a path to input fasta files to be clustered.
reverse_pipe : bool
True/False, whether the reverse reads pipeline is being followed.
Returns
-------
output_fasta_list : list
list of strings, each a path to the output fasta file to which
clusters were written to.
'''
output_fasta_list = []
for input_fasta in input_fasta_list:
output_path = input_fasta.replace('_hits.aln.fa', '_clustered.fa')
cluster_dict = {}
logging.debug('Clustering reads')
if os.path.exists(input_fasta):
reads=self.seqio.read_fasta_file(input_fasta) # Read in FASTA records
logging.debug('Found %i reads' % len(reads)) # Report number found
clusters=self.clust.deduplicate(reads) # Cluster redundant sequences
logging.debug('Clustered to %s groups' % len(clusters)) # Report number of clusters
logging.debug('Writing representative sequences of each cluster to: %s' % output_path) # Report the name of the file
else:
logging.debug("Found no reads to be clustered")
clusters = []
self.seqio.write_fasta_file(
[x[0] for x in clusters],
output_path
) # Choose the first sequence to write to file as representative (all the same anyway)
for cluster in clusters:
cluster_dict[cluster[0].name]=cluster # assign the cluster to the dictionary
self.seq_library[output_path]= cluster_dict
output_fasta_list.append(output_path)
return output_fasta_list
|
cluster - Clusters reads at 100% identity level and writes them to
file. Resets the input_fasta variable as the FASTA file containing the
clusters.
Parameters
----------
input_fasta_list : list
list of strings, each a path to input fasta files to be clustered.
reverse_pipe : bool
True/False, whether the reverse reads pipeline is being followed.
Returns
-------
output_fasta_list : list
list of strings, each a path to the output fasta file to which
clusters were written to.
|
def encrypt_file(file_path, sender, recipients):
"Returns encrypted binary file content if successful"
for recipient_key in recipients:
crypto.assert_type_and_length('recipient_key', recipient_key, (str, crypto.UserLock))
crypto.assert_type_and_length("sender_key", sender, crypto.UserLock)
if (not os.path.exists(file_path)) or (not os.path.isfile(file_path)):
raise OSError("Specified path does not point to a valid file: {}".format(file_path))
_, filename = os.path.split(file_path)
with open(file_path, "rb") as I:
crypted = crypto.MiniLockFile.new(filename, I.read(), sender, recipients)
return crypted.contents
|
Returns encrypted binary file content if successful
|
def hide_columns(self, subset):
"""
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
"""
subset = _non_reducing_slice(subset)
hidden_df = self.data.loc[subset]
self.hidden_columns = self.columns.get_indexer_for(hidden_df.columns)
return self
|
Hide columns from rendering.
.. versionadded:: 0.23.0
Parameters
----------
subset : IndexSlice
An argument to ``DataFrame.loc`` that identifies which columns
are hidden.
Returns
-------
self : Styler
|
def __read_frame(self):
"""*Attempt* to read a frame. If we get an EAGAIN on the frame header,
it'll raise to our caller. If we get it *after* we already got the
header, wait-out the rest of the frame.
"""
if self.__frame_header_cache is None:
_logger.debug("Reading frame header.")
(length, frame_type) = struct.unpack('!II', self.__read(8))
self.__frame_header_cache = (length, frame_type)
else:
(length, frame_type) = self.__frame_header_cache
try:
data = self.__read(length - 4)
except errno.EAGAIN:
self.__frame_header_cache = (length, frame_type)
raise
self.__frame_header_cache = None
self.__process_message(frame_type, data)
|
*Attempt* to read a frame. If we get an EAGAIN on the frame header,
it'll raise to our caller. If we get it *after* we already got the
header, wait-out the rest of the frame.
|
def image(self):
"""
Generates the image using self.genImage(),
then rotates it to self.direction and returns it.
"""
self._image = self.genImage()
self._image = funcs.rotateImage(self._image, self.direction)
return self._image
|
Generates the image using self.genImage(),
then rotates it to self.direction and returns it.
|
def grp_by_src(self):
"""
:returns: a new CompositeSourceModel with one group per source
"""
smodels = []
grp_id = 0
for sm in self.source_models:
src_groups = []
smodel = sm.__class__(sm.names, sm.weight, sm.path, src_groups,
sm.num_gsim_paths, sm.ordinal, sm.samples)
for sg in sm.src_groups:
for src in sg.sources:
src.src_group_id = grp_id
src_groups.append(
sourceconverter.SourceGroup(
sg.trt, [src], name=src.source_id, id=grp_id))
grp_id += 1
smodels.append(smodel)
return self.__class__(self.gsim_lt, self.source_model_lt, smodels,
self.optimize_same_id)
|
:returns: a new CompositeSourceModel with one group per source
|
def set_default_by_index(self, index):
""" Set the default dataset by its index.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
index (int): The index of the dataset that should be made the default.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
"""
if index >= len(self._datasets):
raise DataInvalidIndex('A dataset with index {} does not exist'.format(index))
self._default_index = index
|
Set the default dataset by its index.
After changing the default dataset, all calls without explicitly specifying the
dataset by index or alias will be redirected to this dataset.
Args:
index (int): The index of the dataset that should be made the default.
Raises:
DataInvalidIndex: If the index does not represent a valid dataset.
|
def overall_error_rate(self):
"""Overall error rate metrics (error_rate, substitution_rate, deletion_rate, and insertion_rate)
Returns
-------
dict
results in a dictionary format
"""
substitution_rate = metric.substitution_rate(
Nref=self.overall['Nref'],
Nsubstitutions=self.overall['Nsubs']
)
deletion_rate = metric.deletion_rate(
Nref=self.overall['Nref'],
Ndeletions=self.overall['Nfn']
)
insertion_rate = metric.insertion_rate(
Nref=self.overall['Nref'],
Ninsertions=self.overall['Nfp']
)
error_rate = metric.error_rate(
substitution_rate_value=substitution_rate,
deletion_rate_value=deletion_rate,
insertion_rate_value=insertion_rate
)
return {
'error_rate': error_rate,
'substitution_rate': substitution_rate,
'deletion_rate': deletion_rate,
'insertion_rate': insertion_rate
}
|
Overall error rate metrics (error_rate, substitution_rate, deletion_rate, and insertion_rate)
Returns
-------
dict
results in a dictionary format
|
def _get_maxcov_downsample(data):
"""Calculate maximum coverage downsampling for whole genome samples.
Returns None if we're not doing downsampling.
"""
from bcbio.bam import ref
from bcbio.ngsalign import alignprep, bwa
from bcbio.variation import coverage
fastq_file = data["files"][0]
params = alignprep.get_downsample_params(data)
if params:
num_reads = alignprep.total_reads_from_grabix(fastq_file)
if num_reads:
vrs = dd.get_variant_regions_merged(data)
total_size = sum([c.size for c in ref.file_contigs(dd.get_ref_file(data), data["config"])])
if vrs:
callable_size = pybedtools.BedTool(vrs).total_coverage()
genome_cov_pct = callable_size / float(total_size)
else:
callable_size = total_size
genome_cov_pct = 1.0
if (genome_cov_pct > coverage.GENOME_COV_THRESH
and dd.get_coverage_interval(data) in ["genome", None, False]):
total_counts, total_sizes = 0, 0
for count, size in bwa.fastq_size_output(fastq_file, 5000):
total_counts += int(count)
total_sizes += (int(size) * int(count))
read_size = float(total_sizes) / float(total_counts)
avg_cov = float(num_reads * read_size) / callable_size
if avg_cov >= params["min_coverage_for_downsampling"]:
return int(avg_cov * params["maxcov_downsample_multiplier"])
return None
|
Calculate maximum coverage downsampling for whole genome samples.
Returns None if we're not doing downsampling.
|
def clean(inst):
"""Routine to return VEFI data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
'dusty' or 'clean' removes data when interpolation flag is set to 1
"""
if (inst.clean_level == 'dusty') | (inst.clean_level == 'clean'):
idx, = np.where(inst['B_flag'] == 0)
inst.data = inst[idx, :]
return None
|
Routine to return VEFI data cleaned to the specified level
Parameters
-----------
inst : (pysat.Instrument)
Instrument class object, whose attribute clean_level is used to return
the desired level of data selectivity.
Returns
--------
Void : (NoneType)
data in inst is modified in-place.
Notes
--------
'dusty' or 'clean' removes data when interpolation flag is set to 1
|
def on_epoch_end(self, epoch, smooth_loss, last_metrics, **kwargs):
"Logs training loss, validation loss and custom metrics & log prediction samples & save model"
if self.save_model:
# Adapted from fast.ai "SaveModelCallback"
current = self.get_monitor_value()
if current is not None and self.operator(current, self.best):
print(
f'Better model found at epoch {epoch} with {self.monitor} value: {current}.'
)
self.best = current
# Section modified to save within wandb folder
with self.model_path.open('wb') as model_file:
self.learn.save(model_file)
# Log sample predictions
if self.show_results:
self.learn.show_results() # pyplot display of sample predictions
wandb.log({"Prediction Samples": plt}, commit=False)
# Log losses & metrics
# Adapted from fast.ai "CSVLogger"
logs = {
name: stat
for name, stat in list(
zip(self.learn.recorder.names, [epoch, smooth_loss] +
last_metrics))[1:]
}
wandb.log(logs)
# We can now close results figure
if self.show_results:
plt.close('all')
|
Logs training loss, validation loss and custom metrics & log prediction samples & save model
|
def unpack(fmt, data):
"""unpack(fmt, string) -> (v1, v2, ...)
Unpack the string, containing packed C structure data, according
to fmt. Requires len(string)==calcsize(fmt).
See struct.__doc__ for more on format strings."""
formatdef, endianness, i = getmode(fmt)
j = 0
num = 0
result = []
length = calcsize(fmt)
if length != len(data):
raise StructError("unpack str size does not match format")
while i < len(fmt):
num, i = getNum(fmt, i)
cur = fmt[i]
i += 1
try:
format = formatdef[cur]
except KeyError:
raise StructError("%s is not a valid format" % cur)
if not num:
num = 1
if cur == 'x':
j += num
elif cur == 's':
result.append(data[j:j + num])
j += num
elif cur == 'p':
n = data[j]
if n >= num:
n = num - 1
result.append(data[j + 1:j + n + 1])
j += num
else:
for n in range(num):
result += [format['unpack'](data, j, format['size'], endianness)]
j += format['size']
return tuple(result)
|
unpack(fmt, string) -> (v1, v2, ...)
Unpack the string, containing packed C structure data, according
to fmt. Requires len(string)==calcsize(fmt).
See struct.__doc__ for more on format strings.
|
def _make_plus_helper(obj, fields):
""" add a + prefix to any fields in obj that aren't in fields """
new_obj = {}
for key, value in obj.items():
if key in fields or key.startswith('_'):
# if there's a subschema apply it to a list or subdict
if fields.get(key):
if isinstance(value, list):
value = [_make_plus_helper(item, fields[key])
for item in value]
# assign the value (modified potentially) to the new_obj
new_obj[key] = value
else:
# values not in the fields dict get a +
new_obj['+%s' % key] = value
return new_obj
|
add a + prefix to any fields in obj that aren't in fields
|
def checkStock(self):
"""check stocks in preference"""
if not self.preferences:
logger.debug("no preferences")
return None
soup = BeautifulSoup(
self.xpath(path['stock-table'])[0].html, "html.parser")
count = 0
# iterate through product in left panel
for product in soup.select("div.tradebox"):
prod_name = product.select("span.instrument-name")[0].text
stk_name = [x for x in self.preferences
if x.lower() in prod_name.lower()]
if not stk_name:
continue
name = prod_name
if not [x for x in self.stocks if x.product == name]:
self.stocks.append(Stock(name))
stock = [x for x in self.stocks if x.product == name][0]
if 'tradebox-market-closed' in product['class']:
stock.market = False
if not stock.market:
logger.debug("market closed for %s" % stock.product)
continue
sell_price = product.select("div.tradebox-price-sell")[0].text
buy_price = product.select("div.tradebox-price-buy")[0].text
sent = int(product.select(path['sent'])[0].text.strip('%')) / 100
stock.new_rec([sell_price, buy_price, sent])
count += 1
logger.debug(f"added %d stocks" % count)
return self.stocks
|
check stocks in preference
|
def run(self):
"""
执行任务
"""
while not self._stoped:
self._tx_event.wait()
self._tx_event.clear()
try:
func = self._tx_queue.get_nowait()
if isinstance(func, str):
self._stoped = True
self._rx_queue.put('closed')
self.notice()
break
except Empty:
# pragma: no cover
continue
try:
result = func()
self._rx_queue.put(result)
except Exception as e:
self._rx_queue.put(e)
self.notice()
else:
# pragma: no cover
pass
|
执行任务
|
def localCitesOf(self, rec):
"""Takes in a Record, WOS string, citation string or Citation and returns a RecordCollection of all records that cite it.
# Parameters
_rec_ : `Record, str or Citation`
> The object that is being cited
# Returns
`RecordCollection`
> A `RecordCollection` containing only those `Records` that cite _rec_
"""
localCites = []
if isinstance(rec, Record):
recCite = rec.createCitation()
if isinstance(rec, str):
try:
recCite = self.getID(rec)
except ValueError:
try:
recCite = Citation(rec)
except AttributeError:
raise ValueError("{} is not a valid WOS string or a valid citation string".format(recCite))
else:
if recCite is None:
return RecordCollection(inCollection = localCites, name = "Records_citing_{}".format(rec), quietStart = True)
else:
recCite = recCite.createCitation()
elif isinstance(rec, Citation):
recCite = rec
else:
raise ValueError("{} is not a valid input, rec must be a Record, string or Citation object.".format(rec))
for R in self:
rCites = R.get('citations')
if rCites:
for cite in rCites:
if recCite == cite:
localCites.append(R)
break
return RecordCollection(inCollection = localCites, name = "Records_citing_'{}'".format(rec), quietStart = True)
|
Takes in a Record, WOS string, citation string or Citation and returns a RecordCollection of all records that cite it.
# Parameters
_rec_ : `Record, str or Citation`
> The object that is being cited
# Returns
`RecordCollection`
> A `RecordCollection` containing only those `Records` that cite _rec_
|
def union_join(left, right, left_as='left', right_as='right'):
"""
Join function truest to the SQL style join. Merges both objects together in a sum-type,
saving references to each parent in ``left`` and ``right`` attributes.
>>> Dog = namedtuple('Dog', ['name', 'woof', 'weight'])
>>> dog = Dog('gatsby', 'Ruff!', 15)
>>> Cat = namedtuple('Cat', ['name', 'meow', 'weight'])
>>> cat = Cat('pleo', 'roooowwwr', 12)
>>> catdog = union_join(cat, dog, 'cat', 'dog')
>>> catdog.name
pleo
>>> catdog.woof
Ruff!
>>> catdog.dog.name
gatsby
:param left: left object to be joined with right
:param right: right object to be joined with left
:return: joined object with attrs/methods from both parents available
"""
attrs = {}
attrs.update(get_object_attrs(right))
attrs.update(get_object_attrs(left))
attrs[left_as] = left
attrs[right_as] = right
if isinstance(left, dict) and isinstance(right, dict):
return attrs
else:
joined_class = type(left.__class__.__name__ + right.__class__.__name__, (Union,),
{})
return joined_class(attrs)
|
Join function truest to the SQL style join. Merges both objects together in a sum-type,
saving references to each parent in ``left`` and ``right`` attributes.
>>> Dog = namedtuple('Dog', ['name', 'woof', 'weight'])
>>> dog = Dog('gatsby', 'Ruff!', 15)
>>> Cat = namedtuple('Cat', ['name', 'meow', 'weight'])
>>> cat = Cat('pleo', 'roooowwwr', 12)
>>> catdog = union_join(cat, dog, 'cat', 'dog')
>>> catdog.name
pleo
>>> catdog.woof
Ruff!
>>> catdog.dog.name
gatsby
:param left: left object to be joined with right
:param right: right object to be joined with left
:return: joined object with attrs/methods from both parents available
|
def magic(adata,
name_list=None,
k=10,
a=15,
t='auto',
n_pca=100,
knn_dist='euclidean',
random_state=None,
n_jobs=None,
verbose=False,
copy=None,
**kwargs):
"""Markov Affinity-based Graph Imputation of Cells (MAGIC) API [vanDijk18]_.
MAGIC is an algorithm for denoising and transcript recover of single cells
applied to single-cell sequencing data. MAGIC builds a graph from the data
and uses diffusion to smooth out noise and recover the data manifold.
More information and bug reports
`here <https://github.com/KrishnaswamyLab/MAGIC>`__. For help, visit
<https://krishnaswamylab.org/get-help>.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
An anndata file with `.raw` attribute representing raw counts.
name_list : `list`, `'all_genes'`, or `'pca_only'`, optional (default: `'all_genes'`)
Denoised genes to return. Default is all genes, but this
may require a large amount of memory if the input data is sparse.
k : int, optional, default: 10
number of nearest neighbors on which to build kernel
a : int, optional, default: 15
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
t : int, optional, default: 'auto'
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the Procrustes disparity of the diffused data
n_pca : int, optional, default: 100
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time.
knn_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix
random_state : `int`, `numpy.RandomState` or `None`, optional (default: `None`)
Random seed. Defaults to the global `numpy` random number generator
n_jobs : `int` or None, optional. Default: None
Number of threads to use in training. All cores are used by default.
verbose : `bool`, `int` or `None`, optional (default: `sc.settings.verbosity`)
If `True` or an integer `>= 2`, print status messages.
If `None`, `sc.settings.verbosity` is used.
copy : `bool` or `None`, optional. Default: `None`.
If true, a copy of anndata is returned. If `None`, `copy` is True if
`genes` is not `'all_genes'` or `'pca_only'`. `copy` may only be False
if `genes` is `'all_genes'` or `'pca_only'`, as the resultant data
will otherwise have different column names from the input data.
kwargs : additional arguments to `magic.MAGIC`
Returns
-------
If `copy` is True, AnnData object is returned.
If `subset_genes` is not `all_genes`, PCA on MAGIC values of cells are stored in
`adata.obsm['X_magic']` and `adata.X` is not modified.
The raw counts are stored in `.raw` attribute of AnnData object.
Examples
--------
>>> import scanpy.api as sc
>>> import magic
>>> adata = sc.datasets.paul15()
>>> sc.pp.normalize_per_cell(adata)
>>> sc.pp.sqrt(adata) # or sc.pp.log1p(adata)
>>> adata_magic = sc.pp.magic(adata, name_list=['Mpo', 'Klf1', 'Ifitm1'], k=5)
>>> adata_magic.shape
(2730, 3)
>>> sc.pp.magic(adata, name_list='pca_only', k=5)
>>> adata.obsm['X_magic'].shape
(2730, 100)
>>> sc.pp.magic(adata, name_list='all_genes', k=5)
>>> adata.X.shape
(2730, 3451)
"""
try:
from magic import MAGIC
except ImportError:
raise ImportError(
'Please install magic package via `pip install --user '
'git+git://github.com/KrishnaswamyLab/MAGIC.git#subdirectory=python`')
logg.info('computing PHATE', r=True)
needs_copy = not (name_list is None or
(isinstance(name_list, str) and
name_list in ["all_genes", "pca_only"]))
if copy is None:
copy = needs_copy
elif needs_copy and not copy:
raise ValueError(
"Can only perform MAGIC in-place with `name_list=='all_genes' or "
"`name_list=='pca_only'` (got {}). Consider setting "
"`copy=True`".format(name_list))
adata = adata.copy() if copy else adata
verbose = settings.verbosity if verbose is None else verbose
if isinstance(verbose, (str, int)):
verbose = _settings_verbosity_greater_or_equal_than(2)
n_jobs = settings.n_jobs if n_jobs is None else n_jobs
X_magic = MAGIC(k=k,
a=a,
t=t,
n_pca=n_pca,
knn_dist=knn_dist,
random_state=random_state,
n_jobs=n_jobs,
verbose=verbose,
**kwargs).fit_transform(adata,
genes=name_list)
logg.info(' finished', time=True,
end=' ' if _settings_verbosity_greater_or_equal_than(3) else '\n')
# update AnnData instance
if name_list == "pca_only":
# special case - update adata.obsm with smoothed values
adata.obsm["X_magic"] = X_magic.X
logg.hint('added\n'
' \'X_magic\', PCA on MAGIC coordinates (adata.obsm)')
elif copy:
# just return X_magic
X_magic.raw = adata
adata = X_magic
else:
# replace data with smoothed data
adata.raw = adata
adata.X = X_magic.X
if copy:
return adata
|
Markov Affinity-based Graph Imputation of Cells (MAGIC) API [vanDijk18]_.
MAGIC is an algorithm for denoising and transcript recover of single cells
applied to single-cell sequencing data. MAGIC builds a graph from the data
and uses diffusion to smooth out noise and recover the data manifold.
More information and bug reports
`here <https://github.com/KrishnaswamyLab/MAGIC>`__. For help, visit
<https://krishnaswamylab.org/get-help>.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
An anndata file with `.raw` attribute representing raw counts.
name_list : `list`, `'all_genes'`, or `'pca_only'`, optional (default: `'all_genes'`)
Denoised genes to return. Default is all genes, but this
may require a large amount of memory if the input data is sparse.
k : int, optional, default: 10
number of nearest neighbors on which to build kernel
a : int, optional, default: 15
sets decay rate of kernel tails.
If None, alpha decaying kernel is not used
t : int, optional, default: 'auto'
power to which the diffusion operator is powered.
This sets the level of diffusion. If 'auto', t is selected
according to the Procrustes disparity of the diffused data
n_pca : int, optional, default: 100
Number of principal components to use for calculating
neighborhoods. For extremely large datasets, using
n_pca < 20 allows neighborhoods to be calculated in
roughly log(n_samples) time.
knn_dist : string, optional, default: 'euclidean'
recommended values: 'euclidean', 'cosine', 'precomputed'
Any metric from `scipy.spatial.distance` can be used
distance metric for building kNN graph. If 'precomputed',
`data` should be an n_samples x n_samples distance or
affinity matrix
random_state : `int`, `numpy.RandomState` or `None`, optional (default: `None`)
Random seed. Defaults to the global `numpy` random number generator
n_jobs : `int` or None, optional. Default: None
Number of threads to use in training. All cores are used by default.
verbose : `bool`, `int` or `None`, optional (default: `sc.settings.verbosity`)
If `True` or an integer `>= 2`, print status messages.
If `None`, `sc.settings.verbosity` is used.
copy : `bool` or `None`, optional. Default: `None`.
If true, a copy of anndata is returned. If `None`, `copy` is True if
`genes` is not `'all_genes'` or `'pca_only'`. `copy` may only be False
if `genes` is `'all_genes'` or `'pca_only'`, as the resultant data
will otherwise have different column names from the input data.
kwargs : additional arguments to `magic.MAGIC`
Returns
-------
If `copy` is True, AnnData object is returned.
If `subset_genes` is not `all_genes`, PCA on MAGIC values of cells are stored in
`adata.obsm['X_magic']` and `adata.X` is not modified.
The raw counts are stored in `.raw` attribute of AnnData object.
Examples
--------
>>> import scanpy.api as sc
>>> import magic
>>> adata = sc.datasets.paul15()
>>> sc.pp.normalize_per_cell(adata)
>>> sc.pp.sqrt(adata) # or sc.pp.log1p(adata)
>>> adata_magic = sc.pp.magic(adata, name_list=['Mpo', 'Klf1', 'Ifitm1'], k=5)
>>> adata_magic.shape
(2730, 3)
>>> sc.pp.magic(adata, name_list='pca_only', k=5)
>>> adata.obsm['X_magic'].shape
(2730, 100)
>>> sc.pp.magic(adata, name_list='all_genes', k=5)
>>> adata.X.shape
(2730, 3451)
|
def _is_streaming_request(self):
"""check request is stream request or not"""
arg2 = self.argstreams[1]
arg3 = self.argstreams[2]
return not (isinstance(arg2, InMemStream) and
isinstance(arg3, InMemStream) and
((arg2.auto_close and arg3.auto_close) or (
arg2.state == StreamState.completed and
arg3.state == StreamState.completed)))
|
check request is stream request or not
|
def DropLocation():
""" Get the directory that file drop is watching """
template = Template(template=PathDirs().cfg_file)
drop_loc = template.option('main', 'files')[1]
drop_loc = expanduser(drop_loc)
drop_loc = abspath(drop_loc)
return (True, drop_loc)
|
Get the directory that file drop is watching
|
def _prune_node(self, node):
"""
Prune the given node if context exits cleanly.
"""
if self.is_pruning:
# node is mutable, so capture the key for later pruning now
prune_key, node_body = self._node_to_db_mapping(node)
should_prune = (node_body is not None)
else:
should_prune = False
yield
# Prune only if no exception is raised
if should_prune:
del self.db[prune_key]
|
Prune the given node if context exits cleanly.
|
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
|
Return True if given Distribution is installed in user site.
|
def p_declarations(self, p):
"""declarations : declarations declaration
| declaration"""
n = len(p)
if n == 3:
p[0] = p[1] + [p[2]]
elif n == 2:
p[0] = [p[1]]
|
declarations : declarations declaration
| declaration
|
def namedb_get_preorder(cur, preorder_hash, current_block_number, include_expired=False, expiry_time=None):
"""
Get a preorder record by hash.
If include_expired is set, then so must expiry_time
Return None if not found.
"""
select_query = None
args = None
if include_expired:
select_query = "SELECT * FROM preorders WHERE preorder_hash = ?;"
args = (preorder_hash,)
else:
assert expiry_time is not None, "expiry_time is required with include_expired"
select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND block_number < ?;"
args = (preorder_hash, expiry_time + current_block_number)
preorder_rows = namedb_query_execute( cur, select_query, (preorder_hash,))
preorder_row = preorder_rows.fetchone()
if preorder_row is None:
# no such preorder
return None
preorder_rec = {}
preorder_rec.update( preorder_row )
return preorder_rec
|
Get a preorder record by hash.
If include_expired is set, then so must expiry_time
Return None if not found.
|
def lookup_expand(self, stmt, names):
"""Find schema nodes under `stmt`, also in used groupings.
`names` is a list with qualified names of the schema nodes to
look up. All 'uses'/'grouping' pairs between `stmt` and found
schema nodes are marked for expansion.
"""
if not names: return []
todo = [stmt]
while todo:
pst = todo.pop()
for sub in pst.substmts:
if sub.keyword in self.schema_nodes:
qname = self.qname(sub)
if qname in names:
names.remove(qname)
par = sub.parent
while hasattr(par,"d_ref"): # par must be grouping
par.d_ref.d_expand = True
par = par.d_ref.parent
if not names: return [] # all found
elif sub.keyword == "uses":
g = sub.i_grouping
g.d_ref = sub
todo.append(g)
return names
|
Find schema nodes under `stmt`, also in used groupings.
`names` is a list with qualified names of the schema nodes to
look up. All 'uses'/'grouping' pairs between `stmt` and found
schema nodes are marked for expansion.
|
def _get_target_nearest(self):
"""Get nearest target for each origin"""
reps_query = """
SELECT DISTINCT ON(g2.cartodb_id)
g1.cartodb_id As origin_id,
g2.the_geom,
g2.cartodb_id + {maxorigin} as cartodb_id,
g2.the_geom_webmercator
FROM {origin_table} As g1, {target_table} As g2
ORDER BY g2.cartodb_id, g1.the_geom <-> g2.the_geom
""".format(
maxorigin=self.origins.index.max(),
origin_table=self.origin_table,
target_table=self.target_table
)
nearest_reps = self.context.query(
reps_query,
decode_geom=True
)
nearest_reps = gpd.GeoDataFrame(nearest_reps, geometry='geometry')
init_labels = nearest_reps['origin_id'].values
# update with new information
self.targets['labels'] = init_labels
logging.info('nearest targets retrieved')
return nearest_reps
|
Get nearest target for each origin
|
def connectionLost(self, reason):
"""If we already have an AMP connection registered on the factory,
get rid of it.
"""
if self.connection is not None:
del self.factory.protocols[self.connection]
|
If we already have an AMP connection registered on the factory,
get rid of it.
|
def aggregate(self, **filters):
"""Conduct an aggregate query"""
url = URL.aggregate.format(**locals())
return self.get_pages(url, **filters)
|
Conduct an aggregate query
|
def set(self, style={}):
"""overrides style values at the current stack level"""
_style = {}
for attr in style:
if attr in self.cmds and not style[attr] in self.cmds[attr]:
print 'WARNING: ESC/POS PRINTING: ignoring invalid value: '+utfstr(style[attr])+' for style: '+utfstr(attr)
else:
self.stack[-1][attr] = self.enforce_type(attr, style[attr])
|
overrides style values at the current stack level
|
def convert_result(converter):
"""Decorator that can convert the result of a function call."""
def decorate(fn):
@inspection.wraps(fn)
def new_fn(*args, **kwargs):
return converter(fn(*args, **kwargs))
return new_fn
return decorate
|
Decorator that can convert the result of a function call.
|
def autoLayout( self,
padX = None,
padY = None,
direction = Qt.Horizontal,
layout = 'Layered',
animate = 0,
centerOn = None,
center = None,
debug=False ):
"""
Automatically lays out all the nodes in the scene using the \
autoLayoutNodes method.
:param padX | <int> || None | default is 2 * cell width
padY | <int> || None | default is 2 * cell height
direction | <Qt.Direction>
layout | <str> | name of the layout plugin to use
animate | <int> | number of seconds to animate over
:return {<XNode>: <QRectF>, ..} | new rects per affected node
"""
return self.autoLayoutNodes(self.nodes(),
padX,
padY,
direction,
layout,
animate,
centerOn,
center,
debug)
|
Automatically lays out all the nodes in the scene using the \
autoLayoutNodes method.
:param padX | <int> || None | default is 2 * cell width
padY | <int> || None | default is 2 * cell height
direction | <Qt.Direction>
layout | <str> | name of the layout plugin to use
animate | <int> | number of seconds to animate over
:return {<XNode>: <QRectF>, ..} | new rects per affected node
|
def add_usr_local_bin_to_path(log=False):
""" adds /usr/local/bin to $PATH """
if log:
bookshelf2.logging_helpers.log_green('inserts /usr/local/bin into PATH')
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
capture=True):
try:
sudo('echo "export PATH=/usr/local/bin:$PATH" '
'|sudo /usr/bin/tee /etc/profile.d/fix-path.sh')
return True
except:
raise SystemExit(1)
|
adds /usr/local/bin to $PATH
|
def _write(self, f):
"""Serialize an NDEF record to a file-like object."""
log.debug("writing ndef record at offset {0}".format(f.tell()))
record_type = self.type
record_name = self.name
record_data = self.data
if record_type == '':
header_flags = 0; record_name = ''; record_data = ''
elif record_type.startswith("urn:nfc:wkt:"):
header_flags = 1; record_type = record_type[12:]
elif re.match(r'[a-zA-Z0-9-]+/[a-zA-Z0-9-+.]+', record_type):
header_flags = 2; record_type = record_type
elif re.match(r'[a-zA-Z][a-zA-Z0-9+-.]*://', record_type):
header_flags = 3; record_type = record_type
elif record_type.startswith("urn:nfc:ext:"):
header_flags = 4; record_type = record_type[12:]
elif record_type == 'unknown':
header_flags = 5; record_type = ''
elif record_type == 'unchanged':
header_flags = 6; record_type = ''
type_length = len(record_type)
data_length = len(record_data)
name_length = len(record_name)
if self._message_begin:
header_flags |= 0x80
if self._message_end:
header_flags |= 0x40
if data_length < 256:
header_flags |= 0x10
if name_length > 0:
header_flags |= 0x08
if data_length < 256:
f.write(struct.pack(">BBB", header_flags, type_length, data_length))
else:
f.write(struct.pack(">BBL", header_flags, type_length, data_length))
if name_length > 0:
f.write(struct.pack(">B", name_length))
f.write(record_type)
f.write(record_name)
f.write(record_data)
|
Serialize an NDEF record to a file-like object.
|
def present(name=None,
table_name=None,
region=None,
key=None,
keyid=None,
profile=None,
read_capacity_units=None,
write_capacity_units=None,
alarms=None,
alarms_from_pillar="boto_dynamodb_alarms",
hash_key=None,
hash_key_data_type=None,
range_key=None,
range_key_data_type=None,
local_indexes=None,
global_indexes=None,
backup_configs_from_pillars='boto_dynamodb_backup_configs'):
'''
Ensure the DynamoDB table exists. Table throughput can be updated after
table creation.
Global secondary indexes (GSIs) are managed with some exceptions:
- If a GSI deletion is detected, a failure will occur (deletes should be
done manually in the AWS console).
- If multiple GSIs are added in a single Salt call, a failure will occur
(boto supports one creation at a time). Note that this only applies after
table creation; multiple GSIs can be created during table creation.
- Updates to existing GSIs are limited to read/write capacity only
(DynamoDB limitation).
name
Name of the DynamoDB table
table_name
Name of the DynamoDB table (deprecated)
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
read_capacity_units
The read throughput for this table
write_capacity_units
The write throughput for this table
hash_key
The name of the attribute that will be used as the hash key
for this table
hash_key_data_type
The DynamoDB datatype of the hash key
range_key
The name of the attribute that will be used as the range key
for this table
range_key_data_type
The DynamoDB datatype of the range key
local_indexes
The local indexes you would like to create
global_indexes
The global indexes you would like to create
backup_configs_from_pillars
Pillars to use to configure DataPipeline backups
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if table_name:
ret['warnings'] = ['boto_dynamodb.present: `table_name` is deprecated.'
' Please use `name` instead.']
ret['name'] = table_name
name = table_name
comments = []
changes_old = {}
changes_new = {}
# Ensure DynamoDB table exists
table_exists = __salt__['boto_dynamodb.exists'](
name,
region,
key,
keyid,
profile
)
if not table_exists:
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'DynamoDB table {0} would be created.'.format(name)
return ret
else:
is_created = __salt__['boto_dynamodb.create_table'](
name,
region,
key,
keyid,
profile,
read_capacity_units,
write_capacity_units,
hash_key,
hash_key_data_type,
range_key,
range_key_data_type,
local_indexes,
global_indexes
)
if not is_created:
ret['result'] = False
ret['comment'] = 'Failed to create table {0}'.format(name)
_add_changes(ret, changes_old, changes_new)
return ret
comments.append('DynamoDB table {0} was successfully created'.format(name))
changes_new['table'] = name
changes_new['read_capacity_units'] = read_capacity_units
changes_new['write_capacity_units'] = write_capacity_units
changes_new['hash_key'] = hash_key
changes_new['hash_key_data_type'] = hash_key_data_type
changes_new['range_key'] = range_key
changes_new['range_key_data_type'] = range_key_data_type
changes_new['local_indexes'] = local_indexes
changes_new['global_indexes'] = global_indexes
else:
comments.append('DynamoDB table {0} exists'.format(name))
# Ensure DynamoDB table provisioned throughput matches
description = __salt__['boto_dynamodb.describe'](
name,
region,
key,
keyid,
profile
)
provisioned_throughput = description.get('Table', {}).get('ProvisionedThroughput', {})
current_write_capacity_units = provisioned_throughput.get('WriteCapacityUnits')
current_read_capacity_units = provisioned_throughput.get('ReadCapacityUnits')
throughput_matches = (current_write_capacity_units == write_capacity_units and
current_read_capacity_units == read_capacity_units)
if not throughput_matches:
if __opts__['test']:
ret['result'] = None
comments.append('DynamoDB table {0} is set to be updated.'.format(name))
else:
is_updated = __salt__['boto_dynamodb.update'](
name,
throughput={
'read': read_capacity_units,
'write': write_capacity_units,
},
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if not is_updated:
ret['result'] = False
ret['comment'] = 'Failed to update table {0}'.format(name)
_add_changes(ret, changes_old, changes_new)
return ret
comments.append('DynamoDB table {0} was successfully updated'.format(name))
changes_old['read_capacity_units'] = current_read_capacity_units,
changes_old['write_capacity_units'] = current_write_capacity_units,
changes_new['read_capacity_units'] = read_capacity_units,
changes_new['write_capacity_units'] = write_capacity_units,
else:
comments.append('DynamoDB table {0} throughput matches'.format(name))
provisioned_indexes = description.get('Table', {}).get('GlobalSecondaryIndexes', [])
_ret = _global_indexes_present(provisioned_indexes, global_indexes, changes_old,
changes_new, comments, name, region, key, keyid,
profile)
if not _ret['result']:
comments.append(_ret['comment'])
ret['result'] = _ret['result']
if ret['result'] is False:
ret['comment'] = ',\n'.join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
_ret = _alarms_present(name, alarms, alarms_from_pillar,
write_capacity_units, read_capacity_units,
region, key, keyid, profile)
ret['changes'] = dictupdate.update(ret['changes'], _ret['changes'])
comments.append(_ret['comment'])
if not _ret['result']:
ret['result'] = _ret['result']
if ret['result'] is False:
ret['comment'] = ',\n'.join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
# Ensure backup datapipeline is present
datapipeline_configs = copy.deepcopy(
__salt__['pillar.get'](backup_configs_from_pillars, [])
)
for config in datapipeline_configs:
datapipeline_ret = _ensure_backup_datapipeline_present(
name=name,
schedule_name=config['name'],
period=config['period'],
utc_hour=config['utc_hour'],
s3_base_location=config['s3_base_location'],
)
# Add comments and changes if successful changes were made (True for live mode,
# None for test mode).
if datapipeline_ret['result'] in [True, None]:
ret['result'] = datapipeline_ret['result']
comments.append(datapipeline_ret['comment'])
if datapipeline_ret.get('changes'):
ret['changes']['backup_datapipeline_{0}'.format(config['name'])] = \
datapipeline_ret.get('changes'),
else:
ret['comment'] = ',\n'.join([ret['comment'], datapipeline_ret['comment']])
_add_changes(ret, changes_old, changes_new)
return ret
ret['comment'] = ',\n'.join(comments)
_add_changes(ret, changes_old, changes_new)
return ret
|
Ensure the DynamoDB table exists. Table throughput can be updated after
table creation.
Global secondary indexes (GSIs) are managed with some exceptions:
- If a GSI deletion is detected, a failure will occur (deletes should be
done manually in the AWS console).
- If multiple GSIs are added in a single Salt call, a failure will occur
(boto supports one creation at a time). Note that this only applies after
table creation; multiple GSIs can be created during table creation.
- Updates to existing GSIs are limited to read/write capacity only
(DynamoDB limitation).
name
Name of the DynamoDB table
table_name
Name of the DynamoDB table (deprecated)
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
read_capacity_units
The read throughput for this table
write_capacity_units
The write throughput for this table
hash_key
The name of the attribute that will be used as the hash key
for this table
hash_key_data_type
The DynamoDB datatype of the hash key
range_key
The name of the attribute that will be used as the range key
for this table
range_key_data_type
The DynamoDB datatype of the range key
local_indexes
The local indexes you would like to create
global_indexes
The global indexes you would like to create
backup_configs_from_pillars
Pillars to use to configure DataPipeline backups
|
def remove_directory(self, directory_name, *args, **kwargs):
""" :meth:`.WNetworkClientProto.remove_directory` method implementation
"""
client = self.dav_client()
remote_path = self.join_path(self.session_path(), directory_name)
if client.is_dir(remote_path) is False:
raise ValueError('Unable to remove non-directory entry')
client.clean(remote_path)
|
:meth:`.WNetworkClientProto.remove_directory` method implementation
|
def _get_metadata(network_id, user_id):
"""
Get all the metadata in a network, across all scenarios
returns a dictionary of dict objects, keyed on dataset ID
"""
log.info("Getting Metadata")
dataset_qry = db.DBSession.query(
Dataset
).outerjoin(DatasetOwner, and_(DatasetOwner.dataset_id==Dataset.id, DatasetOwner.user_id==user_id)).filter(
or_(Dataset.hidden=='N', DatasetOwner.user_id != None),
Scenario.id==ResourceScenario.scenario_id,
Scenario.network_id==network_id,
Dataset.id==ResourceScenario.dataset_id).distinct().subquery()
rs_qry = db.DBSession.query(
Metadata
).join(dataset_qry, Metadata.dataset_id==dataset_qry.c.id)
x = time.time()
logging.info("Getting all matadata")
all_metadata = db.DBSession.execute(rs_qry.statement).fetchall()
log.info("%s metadata jointly retrieved in %s",len(all_metadata), time.time()-x)
logging.info("metadata retrieved. Processing results...")
x = time.time()
metadata_dict = dict()
for m in all_metadata:
if metadata_dict.get(m.dataset_id):
metadata_dict[m.dataset_id][m.key] = six.text_type(m.value)
else:
metadata_dict[m.dataset_id] = {m.key : six.text_type(m.value)}
logging.info("metadata processed in %s", time.time()-x)
return metadata_dict
|
Get all the metadata in a network, across all scenarios
returns a dictionary of dict objects, keyed on dataset ID
|
def get_document(self,
name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None):
"""
Retrieves the specified document.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.DocumentsClient()
>>>
>>> name = client.document_path('[PROJECT]', '[KNOWLEDGE_BASE]', '[DOCUMENT]')
>>>
>>> response = client.get_document(name)
Args:
name (str): Required. The name of the document to retrieve.
Format ``projects/<Project ID>/knowledgeBases/<Knowledge Base
ID>/documents/<Document ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.Document` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if 'get_document' not in self._inner_api_calls:
self._inner_api_calls[
'get_document'] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_document,
default_retry=self._method_configs['GetDocument'].retry,
default_timeout=self._method_configs['GetDocument']
.timeout,
client_info=self._client_info,
)
request = document_pb2.GetDocumentRequest(name=name, )
return self._inner_api_calls['get_document'](
request, retry=retry, timeout=timeout, metadata=metadata)
|
Retrieves the specified document.
Example:
>>> import dialogflow_v2beta1
>>>
>>> client = dialogflow_v2beta1.DocumentsClient()
>>>
>>> name = client.document_path('[PROJECT]', '[KNOWLEDGE_BASE]', '[DOCUMENT]')
>>>
>>> response = client.get_document(name)
Args:
name (str): Required. The name of the document to retrieve.
Format ``projects/<Project ID>/knowledgeBases/<Knowledge Base
ID>/documents/<Document ID>``.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dialogflow_v2beta1.types.Document` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
def write_word_at(self, index: int, value: Union[int, BitVec, bool, Bool]) -> None:
"""Writes a 32 byte word to memory at the specified index`
:param index: index to write to
:param value: the value to write to memory
"""
try:
# Attempt to concretize value
if isinstance(value, bool):
_bytes = (
int(1).to_bytes(32, byteorder="big")
if value
else int(0).to_bytes(32, byteorder="big")
)
else:
_bytes = util.concrete_int_to_bytes(value)
assert len(_bytes) == 32
self[index : index + 32] = list(bytearray(_bytes))
except (Z3Exception, AttributeError): # BitVector or BoolRef
value = cast(Union[BitVec, Bool], value)
if isinstance(value, Bool):
value_to_write = If(
value,
symbol_factory.BitVecVal(1, 256),
symbol_factory.BitVecVal(0, 256),
)
else:
value_to_write = value
assert value_to_write.size() == 256
for i in range(0, value_to_write.size(), 8):
self[index + 31 - (i // 8)] = Extract(i + 7, i, value_to_write)
|
Writes a 32 byte word to memory at the specified index`
:param index: index to write to
:param value: the value to write to memory
|
def bit_flip(
p: Optional[float] = None
) -> Union[common_gates.XPowGate, BitFlipChannel]:
r"""
Construct a BitFlipChannel that flips a qubit state
with probability of a flip given by p. If p is None, return
a guaranteed flip in the form of an X operation.
This channel evolves a density matrix via
$$
\rho \rightarrow M_0 \rho M_0^\dagger + M_1 \rho M_1^\dagger
$$
With
$$
\begin{aligned}
M_0 =& \sqrt{p} \begin{bmatrix}
1 & 0 \\
0 & 1
\end{bmatrix}
\\
M_1 =& \sqrt{1-p} \begin{bmatrix}
0 & 1 \\
1 & -0
\end{bmatrix}
\end{aligned}
$$
Args:
p: the probability of a bit flip.
Raises:
ValueError: if p is not a valid probability.
"""
if p is None:
return pauli_gates.X
return _bit_flip(p)
|
r"""
Construct a BitFlipChannel that flips a qubit state
with probability of a flip given by p. If p is None, return
a guaranteed flip in the form of an X operation.
This channel evolves a density matrix via
$$
\rho \rightarrow M_0 \rho M_0^\dagger + M_1 \rho M_1^\dagger
$$
With
$$
\begin{aligned}
M_0 =& \sqrt{p} \begin{bmatrix}
1 & 0 \\
0 & 1
\end{bmatrix}
\\
M_1 =& \sqrt{1-p} \begin{bmatrix}
0 & 1 \\
1 & -0
\end{bmatrix}
\end{aligned}
$$
Args:
p: the probability of a bit flip.
Raises:
ValueError: if p is not a valid probability.
|
def enable_rm_ha(self, new_rm_host_id, zk_service_name=None):
"""
Enable high availability for a YARN ResourceManager.
@param new_rm_host_id: id of the host where the second ResourceManager
will be added.
@param zk_service_name: Name of the ZooKeeper service to use for auto-failover.
If YARN service depends on a ZooKeeper service then that ZooKeeper
service will be used for auto-failover and in that case this parameter
can be omitted.
@return: Reference to the submitted command.
@since: API v6
"""
args = dict(
newRmHostId = new_rm_host_id,
zkServiceName = zk_service_name
)
return self._cmd('enableRmHa', data=args)
|
Enable high availability for a YARN ResourceManager.
@param new_rm_host_id: id of the host where the second ResourceManager
will be added.
@param zk_service_name: Name of the ZooKeeper service to use for auto-failover.
If YARN service depends on a ZooKeeper service then that ZooKeeper
service will be used for auto-failover and in that case this parameter
can be omitted.
@return: Reference to the submitted command.
@since: API v6
|
def _parse_args(self, args, known_only):
"""Helper function to do the main argument parsing.
This function goes through args and does the bulk of the flag parsing.
It will find the corresponding flag in our flag dictionary, and call its
.parse() method on the flag value.
Args:
args: [str], a list of strings with the arguments to parse.
known_only: bool, if True, parse and remove known flags; return the rest
untouched. Unknown flags specified by --undefok are not returned.
Returns:
A tuple with the following:
unknown_flags: List of (flag name, arg) for flags we don't know about.
unparsed_args: List of arguments we did not parse.
Raises:
Error: Raised on any parsing error.
ValueError: Raised on flag value parsing error.
"""
unparsed_names_and_args = [] # A list of (flag name or None, arg).
undefok = set()
retired_flag_func = self.__dict__['__is_retired_flag_func']
flag_dict = self._flags()
args = iter(args)
for arg in args:
value = None
def get_value():
# pylint: disable=cell-var-from-loop
try:
return next(args) if value is None else value
except StopIteration:
raise _exceptions.Error('Missing value for flag ' + arg) # pylint: disable=undefined-loop-variable
if not arg.startswith('-'):
# A non-argument: default is break, GNU is skip.
unparsed_names_and_args.append((None, arg))
if self.is_gnu_getopt():
continue
else:
break
if arg == '--':
if known_only:
unparsed_names_and_args.append((None, arg))
break
# At this point, arg must start with '-'.
if arg.startswith('--'):
arg_without_dashes = arg[2:]
else:
arg_without_dashes = arg[1:]
if '=' in arg_without_dashes:
name, value = arg_without_dashes.split('=', 1)
else:
name, value = arg_without_dashes, None
if not name:
# The argument is all dashes (including one dash).
unparsed_names_and_args.append((None, arg))
if self.is_gnu_getopt():
continue
else:
break
# --undefok is a special case.
if name == 'undefok':
value = get_value()
undefok.update(v.strip() for v in value.split(','))
undefok.update('no' + v.strip() for v in value.split(','))
continue
flag = flag_dict.get(name)
if flag:
if flag.boolean and value is None:
value = 'true'
else:
value = get_value()
elif name.startswith('no') and len(name) > 2:
# Boolean flags can take the form of --noflag, with no value.
noflag = flag_dict.get(name[2:])
if noflag and noflag.boolean:
if value is not None:
raise ValueError(arg + ' does not take an argument')
flag = noflag
value = 'false'
if retired_flag_func and not flag:
is_retired, is_bool = retired_flag_func(name)
# If we didn't recognize that flag, but it starts with
# "no" then maybe it was a boolean flag specified in the
# --nofoo form.
if not is_retired and name.startswith('no'):
is_retired, is_bool = retired_flag_func(name[2:])
is_retired = is_retired and is_bool
if is_retired:
if not is_bool and value is None:
# This happens when a non-bool retired flag is specified
# in format of "--flag value".
get_value()
logging.error('Flag "%s" is retired and should no longer '
'be specified. See go/totw/90.', name)
continue
if flag:
flag.parse(value)
flag.using_default_value = False
else:
unparsed_names_and_args.append((name, arg))
unknown_flags = []
unparsed_args = []
for name, arg in unparsed_names_and_args:
if name is None:
# Positional arguments.
unparsed_args.append(arg)
elif name in undefok:
# Remove undefok flags.
continue
else:
# This is an unknown flag.
if known_only:
unparsed_args.append(arg)
else:
unknown_flags.append((name, arg))
unparsed_args.extend(list(args))
return unknown_flags, unparsed_args
|
Helper function to do the main argument parsing.
This function goes through args and does the bulk of the flag parsing.
It will find the corresponding flag in our flag dictionary, and call its
.parse() method on the flag value.
Args:
args: [str], a list of strings with the arguments to parse.
known_only: bool, if True, parse and remove known flags; return the rest
untouched. Unknown flags specified by --undefok are not returned.
Returns:
A tuple with the following:
unknown_flags: List of (flag name, arg) for flags we don't know about.
unparsed_args: List of arguments we did not parse.
Raises:
Error: Raised on any parsing error.
ValueError: Raised on flag value parsing error.
|
def cmd_list(args):
"""List all element in pen"""
for penlist in penStore.data:
puts(penlist + " (" + str(len(penStore.data[penlist])) + ")")
|
List all element in pen
|
def list_cameras():
""" List all attached USB cameras that are supported by libgphoto2.
:return: All recognized cameras
:rtype: list of :py:class:`Camera`
"""
ctx = lib.gp_context_new()
camlist_p = new_gp_object("CameraList")
port_list_p = new_gp_object("GPPortInfoList")
lib.gp_port_info_list_load(port_list_p)
abilities_list_p = new_gp_object("CameraAbilitiesList")
lib.gp_abilities_list_load(abilities_list_p, ctx)
lib.gp_abilities_list_detect(abilities_list_p, port_list_p,
camlist_p, ctx)
out = []
for idx in range(lib.gp_list_count(camlist_p)):
name = get_string(lib.gp_list_get_name, camlist_p, idx)
value = get_string(lib.gp_list_get_value, camlist_p, idx)
# Skip iteration if no matches
matches = re.match(r"usb:(\d+),(\d+)", value)
if not matches:
continue
bus_no, device_no = (int(x) for x in matches.groups())
abilities = ffi.new("CameraAbilities*")
ability_idx = lib.gp_abilities_list_lookup_model(
abilities_list_p, name.encode())
lib.gp_abilities_list_get_abilities(abilities_list_p, ability_idx,
abilities)
if abilities.device_type == lib.GP_DEVICE_STILL_CAMERA:
out.append(Camera(bus_no, device_no, lazy=True,
_abilities=abilities))
lib.gp_list_free(camlist_p)
lib.gp_port_info_list_free(port_list_p)
lib.gp_abilities_list_free(abilities_list_p)
return out
|
List all attached USB cameras that are supported by libgphoto2.
:return: All recognized cameras
:rtype: list of :py:class:`Camera`
|
def get(self, request, *args, **kwargs):
"""
Method for handling GET requests. Passes the
following arguments to the context:
* **obj** - The object to publish
* **done_url** - The result of the `get_done_url` method
"""
self.object = self.get_object()
return self.render(request, obj=self.object,
done_url=self.get_done_url())
|
Method for handling GET requests. Passes the
following arguments to the context:
* **obj** - The object to publish
* **done_url** - The result of the `get_done_url` method
|
def items(self, prefix=None, delimiter=None):
"""Get an iterator for the items within this bucket.
Args:
prefix: an optional prefix to match items.
delimiter: an optional string to simulate directory-like semantics. The returned items
will be those whose names do not contain the delimiter after the prefix. For
the remaining items, the names will be returned truncated after the delimiter
with duplicates removed (i.e. as pseudo-directories).
Returns:
An iterable list of items within this bucket.
"""
return _item.Items(self._name, prefix, delimiter, context=self._context)
|
Get an iterator for the items within this bucket.
Args:
prefix: an optional prefix to match items.
delimiter: an optional string to simulate directory-like semantics. The returned items
will be those whose names do not contain the delimiter after the prefix. For
the remaining items, the names will be returned truncated after the delimiter
with duplicates removed (i.e. as pseudo-directories).
Returns:
An iterable list of items within this bucket.
|
def record(self):
# type: () -> bytes
'''
A method to generate the string representing this UDF Entity ID.
Parameters:
None.
Returns:
A string representing this UDF Entity ID.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('UDF Entity ID not initialized')
return struct.pack(self.FMT, self.flags, self.identifier, self.suffix)
|
A method to generate the string representing this UDF Entity ID.
Parameters:
None.
Returns:
A string representing this UDF Entity ID.
|
def log_conditional_likelihood(self, x):
"""
likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes
"""
T, D = self.T, self.D_latent
assert x.shape == (T, D)
ll = 0
for t in range(self.T):
ll += self.local_log_likelihood(x[t], self.data[t], self.inputs[t])
return ll
|
likelihood \sum_t log p(y_t | x_t)
Optionally override this in base classes
|
def _api_call(function):
"""
Decorator to call a pywebview API, checking for _webview_ready and raisings
appropriate Exceptions on failure.
"""
@wraps(function)
def wrapper(*args, **kwargs):
try:
if not _webview_ready.wait(15):
raise Exception('Main window failed to start')
return function(*args, **kwargs)
except NameError:
raise Exception('Create a web view window first, before invoking this function')
except KeyError as e:
try:
uid = kwargs['uid']
except KeyError:
# uid not passed as a keyword arg, assumes it to be last in the arg list
uid = args[-1]
raise Exception('Cannot call function: No webview exists with uid: {}'.format(uid))
return wrapper
|
Decorator to call a pywebview API, checking for _webview_ready and raisings
appropriate Exceptions on failure.
|
def get_profile(profile, caller, runner):
'''
Get profile.
:param profile:
:return:
'''
profiles = profile.split(',')
data = {}
for profile in profiles:
if os.path.basename(profile) == profile:
profile = profile.split('.')[0] # Trim extension if someone added it
profile_path = os.path.join(os.path.dirname(__file__), 'profiles', profile + '.yml')
else:
profile_path = profile
if os.path.exists(profile_path):
try:
rendered_template = _render_profile(profile_path, caller, runner)
line = '-' * 80
log.debug('\n%s\n%s\n%s\n', line, rendered_template, line)
data.update(yaml.load(rendered_template))
except Exception as ex:
log.debug(ex, exc_info=True)
raise salt.exceptions.SaltException('Rendering profile failed: {}'.format(ex))
else:
raise salt.exceptions.SaltException('Profile "{}" is not found.'.format(profile))
return data
|
Get profile.
:param profile:
:return:
|
def _position(self):
"""Get media position."""
position = 0
if self.state != STATE_IDLE:
resp = self._player.query_position(_FORMAT_TIME)
position = resp[1] // _NANOSEC_MULT
return position
|
Get media position.
|
def get_handler_stats(self):
''' Return handler read statistics
Returns a dictionary of managed handler data read statistics. The
format is primarily controlled by the
:func:`SocketStreamCapturer.dump_all_handler_stats` function::
{
<capture address>: <list of handler capture statistics>
}
'''
return {
address : stream_capturer[0].dump_all_handler_stats()
for address, stream_capturer in self._stream_capturers.iteritems()
}
|
Return handler read statistics
Returns a dictionary of managed handler data read statistics. The
format is primarily controlled by the
:func:`SocketStreamCapturer.dump_all_handler_stats` function::
{
<capture address>: <list of handler capture statistics>
}
|
def email_send(text_template, html_template, data, subject, emails, headers=None):
"""Send an HTML/Plaintext email with the following fields.
text_template: URL to a Django template for the text email's contents
html_template: URL to a Django tempalte for the HTML email's contents
data: The context to pass to the templates
subject: The subject of the email
emails: The addresses to send the email to
headers: A dict of additional headers to send to the message
"""
text = get_template(text_template)
html = get_template(html_template)
text_content = text.render(data)
html_content = html.render(data)
subject = settings.EMAIL_SUBJECT_PREFIX + subject
headers = {} if headers is None else headers
msg = EmailMultiAlternatives(subject, text_content, settings.EMAIL_FROM, emails, headers=headers)
msg.attach_alternative(html_content, "text/html")
logger.debug("Emailing {} to {}".format(subject, emails))
msg.send()
return msg
|
Send an HTML/Plaintext email with the following fields.
text_template: URL to a Django template for the text email's contents
html_template: URL to a Django tempalte for the HTML email's contents
data: The context to pass to the templates
subject: The subject of the email
emails: The addresses to send the email to
headers: A dict of additional headers to send to the message
|
def build(self, builder):
"""Build XML by appending to builder"""
builder.start("Protocol", {})
for child in self.study_event_refs:
child.build(builder)
for alias in self.aliases:
alias.build(builder)
builder.end("Protocol")
|
Build XML by appending to builder
|
def cmd_isn(ip, port, count, iface, graph, verbose):
"""Create TCP connections and print the TCP initial sequence
numbers for each one.
\b
$ sudo habu.isn -c 5 www.portantier.com
1962287220
1800895007
589617930
3393793979
469428558
Note: You can get a graphical representation (needs the matplotlib package)
using the '-g' option to better understand the randomness.
"""
conf.verb = False
if iface:
conf.iface = iface
isn_values = []
for _ in range(count):
pkt = IP(dst=ip)/TCP(sport=RandShort(), dport=port, flags="S")
ans = sr1(pkt, timeout=0.5)
if ans:
send(IP(dst=ip)/TCP(sport=pkt[TCP].sport, dport=port, ack=ans[TCP].seq + 1, flags='A'))
isn_values.append(ans[TCP].seq)
if verbose:
ans.show2()
if graph:
try:
import matplotlib.pyplot as plt
except ImportError:
print("To graph support, install matplotlib")
return 1
plt.plot(range(len(isn_values)), isn_values, 'ro')
plt.show()
else:
for v in isn_values:
print(v)
return True
|
Create TCP connections and print the TCP initial sequence
numbers for each one.
\b
$ sudo habu.isn -c 5 www.portantier.com
1962287220
1800895007
589617930
3393793979
469428558
Note: You can get a graphical representation (needs the matplotlib package)
using the '-g' option to better understand the randomness.
|
def _delete_port_profile_from_ucsm(self, handle, port_profile, ucsm_ip):
"""Deletes Port Profile from UCS Manager."""
port_profile_dest = (const.PORT_PROFILESETDN + const.VNIC_PATH_PREFIX +
port_profile)
handle.StartTransaction()
# Find port profile on the UCS Manager
p_profile = handle.GetManagedObject(
None,
self.ucsmsdk.VnicProfile.ClassId(),
{self.ucsmsdk.VnicProfile.NAME: port_profile,
self.ucsmsdk.VnicProfile.DN: port_profile_dest})
if p_profile:
handle.RemoveManagedObject(p_profile)
else:
LOG.warning('UCS Manager network driver did not find '
'Port Profile %s to delete.',
port_profile)
handle.CompleteTransaction()
|
Deletes Port Profile from UCS Manager.
|
def update(context, id, etag, name, country, parent_id, active, external):
"""update(context, id, etag, name, country, parent_id, active, external)
Update a team.
>>> dcictl team-update [OPTIONS]
:param string id: ID of the team to update [required]
:param string etag: Entity tag of the resource [required]
:param string name: Name of the team [required]
:param string country: Country code where the team is based
:param boolean active: Set the team in the (in)active state
:param string parent_id: The ID of the team this team belongs to
:param boolean external: Set the team as external
"""
result = team.update(context, id=id, etag=etag, name=name,
state=utils.active_string(active),
country=country, parent_id=parent_id,
external=external)
utils.format_output(result, context.format)
|
update(context, id, etag, name, country, parent_id, active, external)
Update a team.
>>> dcictl team-update [OPTIONS]
:param string id: ID of the team to update [required]
:param string etag: Entity tag of the resource [required]
:param string name: Name of the team [required]
:param string country: Country code where the team is based
:param boolean active: Set the team in the (in)active state
:param string parent_id: The ID of the team this team belongs to
:param boolean external: Set the team as external
|
def _get_chrbands(self, limit, taxon):
"""
For the given taxon, it will fetch the chr band file.
We will not deal with the coordinate information with this parser.
Here, we only are concerned with building the partonomy.
:param limit:
:return:
"""
model = Model(self.graph)
line_counter = 0
myfile = '/'.join((self.rawdir, self.files[taxon]['file']))
LOG.info("Processing Chr bands from FILE: %s", myfile)
geno = Genotype(self.graph)
# build the organism's genome from the taxon
genome_label = self.files[taxon]['genome_label']
taxon_id = 'NCBITaxon:' + taxon
# add the taxon as a class. adding the class label elsewhere
model.addClassToGraph(taxon_id, None)
model.addSynonym(taxon_id, genome_label)
genome_id = geno.makeGenomeID(taxon_id)
geno.addGenome(taxon_id, genome_label)
model.addOWLPropertyClassRestriction(
genome_id, self.globaltt['in taxon'], taxon_id)
placed_scaffold_pattern = r'chr(\d+|X|Y|Z|W|MT|M)'
# currently unused patterns
# unlocalized_scaffold_pattern = placed_scaffold_pattern + r'_(\w+)_random'
# unplaced_scaffold_pattern = r'chrUn_(\w+)'
col = ['chrom', 'start', 'stop', 'band', 'rtype']
with gzip.open(myfile, 'rb') as reader:
for line in reader:
line_counter += 1
# skip comments
line = line.decode().strip()
if line[0] == '#':
continue
# chr13 4500000 10000000 p12 stalk
row = line.split('\t')
chrom = row[col.index('chrom')]
band = row[col.index('band')]
rtype = row[col.index('rtype')]
# NOTE
# some less-finished genomes have placed and unplaced scaffolds
# * Placed scaffolds:
# Scaffold has an oriented location within a chromosome.
# * Unlocalized scaffolds:
# scaffold 's chromosome is known,
# scaffold's position, orientation or both is not known.
# *Unplaced scaffolds:
# it is not known which chromosome the scaffold belongs to.
# find out if the thing is a full on chromosome, or a scaffold:
# ex: unlocalized scaffold: chr10_KL568008v1_random
# ex: unplaced scaffold: chrUn_AABR07022428v1
mch = re.match(placed_scaffold_pattern+r'$', chrom)
if mch is not None and len(mch.groups()) == 1:
# the chromosome is the first match of the pattern
# chrom = m.group(1) # TODO unused
pass
else:
# let's skip over anything that isn't a placed_scaffold
LOG.info("Skipping non-placed chromosome %s", chrom)
continue
# the chrom class, taxon as the reference
cclassid = makeChromID(chrom, taxon, 'CHR')
# add the chromosome as a class
geno.addChromosomeClass(chrom, taxon_id, genome_label)
model.addOWLPropertyClassRestriction(
cclassid, self.globaltt['member of'], genome_id)
# add the band(region) as a class
maplocclass_id = cclassid+band
maplocclass_label = makeChromLabel(chrom+band, genome_label)
if band is not None and band.strip() != '':
region_type_id = self.map_type_of_region(rtype)
model.addClassToGraph(
maplocclass_id, maplocclass_label,
region_type_id)
else:
region_type_id = self.globaltt['chromosome']
# add the staining intensity of the band
if re.match(r'g(neg|pos|var)', rtype):
if region_type_id in [
self.globaltt['chromosome_band'],
self.globaltt['chromosome_subband']]:
stain_type = self.resolve(rtype)
if stain_type is not None:
model.addOWLPropertyClassRestriction(
maplocclass_id,
self.globaltt['has_sequence_attribute'],
self.resolve(rtype))
else:
# usually happens if it's a chromosome because
# they don't actually have banding info
LOG.info("feature type %s != chr band", region_type_id)
else:
LOG.warning('staining type not found: %s', rtype)
# get the parent bands, and make them unique
parents = list(self.make_parent_bands(band, set()))
# alphabetical sort will put them in smallest to biggest
parents.sort(reverse=True)
# print("PARENTS of", maplocclass_id, "=", parents)
# add the parents to the graph, in hierarchical order
# TODO this is somewhat inefficient due to
# re-adding upper-level nodes when iterating over the file
for prnt in parents:
parent = prnt.strip()
if parent is None or parent == "":
continue
pclassid = cclassid + parent # class chr parts
pclass_label = makeChromLabel(chrom + parent, genome_label)
rti = getChrPartTypeByNotation(parent, self.graph)
model.addClassToGraph(pclassid, pclass_label, rti)
# for canonical chromosomes,
# then the subbands are subsequences of the full band
# add the subsequence stuff as restrictions
if prnt != parents[-1]:
grandparent = 1 + parents.index(prnt)
pid = cclassid + parents[grandparent] # the instance
model.addOWLPropertyClassRestriction(
pclassid, self.globaltt['is subsequence of'], pid)
model.addOWLPropertyClassRestriction(
pid, self.globaltt['has subsequence'], pclassid)
else:
# add the last one (p or q usually)
# as attached to the chromosome
model.addOWLPropertyClassRestriction(
pclassid, self.globaltt['is subsequence of'], cclassid)
model.addOWLPropertyClassRestriction(
cclassid, self.globaltt['has subsequence'], pclassid)
# connect the band here to the first one in the parent list
if len(parents) > 0:
model.addOWLPropertyClassRestriction(
maplocclass_id, self.globaltt['is subsequence of'],
cclassid + parents[0])
model.addOWLPropertyClassRestriction(
cclassid + parents[0], self.globaltt['has subsequence'],
maplocclass_id)
if limit is not None and line_counter > limit:
break
# TODO figure out the staining intensities for the encompassing bands
return
|
For the given taxon, it will fetch the chr band file.
We will not deal with the coordinate information with this parser.
Here, we only are concerned with building the partonomy.
:param limit:
:return:
|
def apply_u_umlaut(stem: str):
"""
Changes the vowel of the last syllable of the given stem if the vowel is affected by an u-umlaut.
>>> apply_u_umlaut("far")
'för'
>>> apply_u_umlaut("ör")
'ör'
>>> apply_u_umlaut("axl")
'öxl'
>>> apply_u_umlaut("hafn")
'höfn'
:param stem:
:return:
"""
assert len(stem) > 0
s_stem = s.syllabify_ssp(stem.lower())
if len(s_stem) == 1:
last_syllable = OldNorseSyllable(s_stem[-1], VOWELS, CONSONANTS)
last_syllable.apply_u_umlaut()
return "".join(s_stem[:-1]) + str(last_syllable)
else:
penultimate_syllable = OldNorseSyllable(s_stem[-2], VOWELS, CONSONANTS)
last_syllable = OldNorseSyllable(s_stem[-1], VOWELS, CONSONANTS)
penultimate_syllable.apply_u_umlaut()
last_syllable.apply_u_umlaut(True)
last_syllable.apply_u_umlaut(True)
return "".join(s_stem[:-2]) + str(penultimate_syllable) + str(last_syllable)
|
Changes the vowel of the last syllable of the given stem if the vowel is affected by an u-umlaut.
>>> apply_u_umlaut("far")
'för'
>>> apply_u_umlaut("ör")
'ör'
>>> apply_u_umlaut("axl")
'öxl'
>>> apply_u_umlaut("hafn")
'höfn'
:param stem:
:return:
|
def process_form(self, instance, field, form, empty_marker = None,
emptyReturnsMarker = False):
""" Some special field handling for disabled fields, which don't
get submitted by the browser but still need to be written away.
"""
bsc = getToolByName(instance, 'bika_setup_catalog')
default = super(PartitionSetupWidget,self).process_form(
instance, field, form, empty_marker, emptyReturnsMarker)
if not default:
return [], {}
value = default[0]
kwargs = len(default) > 1 and default[1] or {}
newvalue = []
for v in value:
v = dict(v)
if v.get('separate', '') == 'on' and not 'preservation' in v:
container_uid = v.get('container', [''])[0];
if container_uid:
container = bsc(UID=container_uid)[0].getObject();
if container.getPrePreserved():
pres = container.getPreservation()
if pres:
v['preservation'] = [pres.UID()]
newvalue.append(v)
return newvalue, kwargs
|
Some special field handling for disabled fields, which don't
get submitted by the browser but still need to be written away.
|
def notes_to_positions(notes, root):
""" Get notes positions.
ex) notes_to_positions(["C", "E", "G"], "C") -> [0, 4, 7]
:param list[str] notes: list of notes
:param str root: the root note
:rtype: list[int]
:return: list of note positions
"""
root_pos = note_to_val(root)
current_pos = root_pos
positions = []
for note in notes:
note_pos = note_to_val(note)
if note_pos < current_pos:
note_pos += 12 * ((current_pos - note_pos) // 12 + 1)
positions.append(note_pos - root_pos)
current_pos = note_pos
return positions
|
Get notes positions.
ex) notes_to_positions(["C", "E", "G"], "C") -> [0, 4, 7]
:param list[str] notes: list of notes
:param str root: the root note
:rtype: list[int]
:return: list of note positions
|
def current_git_dir():
"""Locate the .git directory."""
path = os.path.abspath(os.curdir)
while path != '/':
if os.path.isdir(os.path.join(path, '.git')):
return os.path.join(path, '.git')
path = os.path.dirname(path)
return None
|
Locate the .git directory.
|
def _get_pos(self):
"""
Get current position for scroll bar.
"""
if self._h >= len(self._options):
return 0
else:
return self._start_line / (len(self._options) - self._h)
|
Get current position for scroll bar.
|
def getStrings(lang_dict):
"""
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
"""
field_list = []
kw = {}
try:
str_count = len(next(iter(lang_dict.values())))
except StopIteration:
str_count = 0
else:
for lang, string_list in lang_dict.items():
if len(string_list) != str_count:
raise ValueError('All values must have the same string count.')
field_id = 'strings_%04x' % lang
strings = b'\x00'.join(x.encode('utf-8') for x in string_list) + b'\x00'
field_type = type(
'String',
(StringBase, ),
{
'_fields_': [
('strings', ctypes.c_char * len(strings)),
],
},
)
field_list.append((field_id, field_type))
kw[field_id] = field_type(
lang=lang,
strings=strings,
)
klass = type(
'Strings',
(StringsHead, ),
{
'_fields_': field_list,
},
)
return klass(
magic=STRINGS_MAGIC,
length=ctypes.sizeof(klass),
str_count=str_count,
lang_count=len(lang_dict),
**kw
)
|
Return a FunctionFS descriptor suitable for serialisation.
lang_dict (dict)
Key: language ID (ex: 0x0409 for en-us)
Value: list of unicode objects
All values must have the same number of items.
|
def rackconnect(vm_):
'''
Determine if we should wait for rackconnect automation before running.
Either 'False' (default) or 'True'.
'''
return config.get_cloud_config_value(
'rackconnect', vm_, __opts__, default=False,
search_global=False
)
|
Determine if we should wait for rackconnect automation before running.
Either 'False' (default) or 'True'.
|
def update_helper_political_level(self):
"""To update the helper about the country and the admin_level."""
current_country = self.country_comboBox.currentText()
index = self.admin_level_comboBox.currentIndex()
current_level = self.admin_level_comboBox.itemData(index)
content = None
try:
content = \
self.countries[current_country]['levels'][str(current_level)]
if content == 'N/A' or content == 'fixme' or content == '':
raise KeyError
except KeyError:
content = self.tr('undefined')
finally:
text = self.tr('which represents %s in') % content
self.boundary_helper.setText(text)
|
To update the helper about the country and the admin_level.
|
def ADOSC(frame, fast=3, slow=10, high_col='high', low_col='low', close_col='close', vol_col='Volume'):
"""Chaikin A/D oscillator"""
return _frame_to_series(frame, [high_col, low_col, close_col, vol_col], talib.ADOSC, fast, slow)
|
Chaikin A/D oscillator
|
def _max(self):
"""Getter for the maximum series value"""
return (
self.range[1] if (self.range and self.range[1] is not None) else
(max(self._values) if self._values else None)
)
|
Getter for the maximum series value
|
def outputFieldMarkdown(self):
"""
Sends the field definitions ot standard out
"""
f, d = self.getFieldsColumnLengths()
fc, dc = self.printFieldsHeader(f, d)
f = max(fc, f)
d = max(dc, d)
self.printFields(f, d)
|
Sends the field definitions ot standard out
|
def switch_to_window(self, window, wait=None):
"""
If ``window`` is a lambda, it switches to the first window for which ``window`` returns a
value other than False or None. If a window that matches can't be found, the window will be
switched back and :exc:`WindowError` will be raised.
Args:
window (Window | lambda): The window that should be switched to, or a filtering lambda.
wait (int | float, optional): The number of seconds to wait to find the window.
Returns:
Window: The new current window.
Raises:
ScopeError: If this method is invoked inside :meth:`scope, :meth:`frame`, or
:meth:`window`.
WindowError: If no window matches the given lambda.
"""
if len(self._scopes) > 1:
raise ScopeError(
"`switch_to_window` is not supposed to be invoked from "
"within `scope`s, `frame`s, or other `window`s.")
if isinstance(window, Window):
self.driver.switch_to_window(window.handle)
return window
else:
@self.document.synchronize(errors=(WindowError,), wait=wait)
def switch_and_get_matching_window():
original_window_handle = self.driver.current_window_handle
try:
for handle in self.driver.window_handles:
self.driver.switch_to_window(handle)
result = window()
if result:
return Window(self, handle)
except Exception:
self.driver.switch_to_window(original_window_handle)
raise
self.driver.switch_to_window(original_window_handle)
raise WindowError("Could not find a window matching lambda")
return switch_and_get_matching_window()
|
If ``window`` is a lambda, it switches to the first window for which ``window`` returns a
value other than False or None. If a window that matches can't be found, the window will be
switched back and :exc:`WindowError` will be raised.
Args:
window (Window | lambda): The window that should be switched to, or a filtering lambda.
wait (int | float, optional): The number of seconds to wait to find the window.
Returns:
Window: The new current window.
Raises:
ScopeError: If this method is invoked inside :meth:`scope, :meth:`frame`, or
:meth:`window`.
WindowError: If no window matches the given lambda.
|
def _calc_traceback_limit(tb):
"""Calculates limit-parameter to strip away pytypes' internals when used
with API from traceback module.
"""
limit = 1
tb2 = tb
while not tb2.tb_next is None:
try:
maybe_pytypes = tb2.tb_next.tb_frame.f_code.co_filename.split(os.sep)[-2]
except IndexError:
maybe_pytypes = None
if maybe_pytypes == 'pytypes' and not \
tb2.tb_next.tb_frame.f_code == pytypes.typechecker._pytypes___import__.__code__:
break
else:
limit += 1
tb2 = tb2.tb_next
return limit
|
Calculates limit-parameter to strip away pytypes' internals when used
with API from traceback module.
|
def walker(top, names):
"""
Walks a directory and records all packages and file extensions.
"""
global packages, extensions
if any(exc in top for exc in excludes):
return
package = top[top.rfind('holoviews'):].replace(os.path.sep, '.')
packages.append(package)
for name in names:
ext = '.'.join(name.split('.')[1:])
ext_str = '*.%s' % ext
if ext and ext not in excludes and ext_str not in extensions[package]:
extensions[package].append(ext_str)
|
Walks a directory and records all packages and file extensions.
|
def deftypes(self):
"""generator on all definition of type"""
for f in self.body:
if (hasattr(f, '_ctype')
and (f._ctype._storage == Storages.TYPEDEF
or (f._name == '' and isinstance(f._ctype, ComposedType)))):
yield f
|
generator on all definition of type
|
def linearization_error(nodes):
r"""Compute the maximum error of a linear approximation.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
.. note::
This is a helper for :class:`.Linearization`, which is used during the
curve-curve intersection process.
We use the line
.. math::
L(s) = v_0 (1 - s) + v_n s
and compute a bound on the maximum error
.. math::
\max_{s \in \left[0, 1\right]} \|B(s) - L(s)\|_2.
Rather than computing the actual maximum (a tight bound), we
use an upper bound via the remainder from Lagrange interpolation
in each component. This leaves us with :math:`\frac{s(s - 1)}{2!}`
times the second derivative in each component.
The second derivative curve is degree :math:`d = n - 2` and
is given by
.. math::
B''(s) = n(n - 1) \sum_{j = 0}^{d} \binom{d}{j} s^j
(1 - s)^{d - j} \cdot \Delta^2 v_j
Due to this form (and the convex combination property of
B |eacute| zier Curves) we know each component of the second derivative
will be bounded by the maximum of that component among the
:math:`\Delta^2 v_j`.
For example, the curve
.. math::
B(s) = \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^2
+ \left[\begin{array}{c} 3 \\ 1 \end{array}\right] 2s(1 - s)
+ \left[\begin{array}{c} 9 \\ -2 \end{array}\right] s^2
has
:math:`B''(s) \equiv \left[\begin{array}{c} 6 \\ -8 \end{array}\right]`
which has norm :math:`10` everywhere, hence the maximum error is
.. math::
\left.\frac{s(1 - s)}{2!} \cdot 10\right|_{s = \frac{1}{2}}
= \frac{5}{4}.
.. image:: ../images/linearization_error.png
:align: center
.. testsetup:: linearization-error, linearization-error-fail
import numpy as np
import bezier
from bezier._geometric_intersection import linearization_error
.. doctest:: linearization-error
>>> nodes = np.asfortranarray([
... [0.0, 3.0, 9.0],
... [0.0, 1.0, -2.0],
... ])
>>> linearization_error(nodes)
1.25
.. testcleanup:: linearization-error
import make_images
make_images.linearization_error(nodes)
As a **non-example**, consider a "pathological" set of control points:
.. math::
B(s) = \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^3
+ \left[\begin{array}{c} 5 \\ 12 \end{array}\right] 3s(1 - s)^2
+ \left[\begin{array}{c} 10 \\ 24 \end{array}\right] 3s^2(1 - s)
+ \left[\begin{array}{c} 30 \\ 72 \end{array}\right] s^3
By construction, this lies on the line :math:`y = \frac{12x}{5}`, but
the parametrization is cubic:
:math:`12 \cdot x(s) = 5 \cdot y(s) = 180s(s^2 + 1)`. Hence, the fact
that the curve is a line is not accounted for and we take the worse
case among the nodes in:
.. math::
B''(s) = 3 \cdot 2 \cdot \left(
\left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)
+ \left[\begin{array}{c} 15 \\ 36 \end{array}\right] s\right)
which gives a nonzero maximum error:
.. doctest:: linearization-error-fail
>>> nodes = np.asfortranarray([
... [0.0, 5.0, 10.0, 30.0],
... [0.0, 12.0, 24.0, 72.0],
... ])
>>> linearization_error(nodes)
29.25
Though it may seem that ``0`` is a more appropriate answer, consider
the **goal** of this function. We seek to linearize curves and then
intersect the linear approximations. Then the :math:`s`-values from
the line-line intersection is lifted back to the curves. Thus
the error :math:`\|B(s) - L(s)\|_2` is more relevant than the
underyling algebraic curve containing :math:`B(s)`.
.. note::
It may be more appropriate to use a **relative** linearization error
rather than the **absolute** error provided here. It's unclear if
the domain :math:`\left[0, 1\right]` means the error is **already**
adequately scaled or if the error should be scaled by the arc
length of the curve or the (easier-to-compute) length of the line.
Args:
nodes (numpy.ndarray): Nodes of a curve.
Returns:
float: The maximum error between the curve and the
linear approximation.
"""
_, num_nodes = nodes.shape
degree = num_nodes - 1
if degree == 1:
return 0.0
second_deriv = nodes[:, :-2] - 2.0 * nodes[:, 1:-1] + nodes[:, 2:]
worst_case = np.max(np.abs(second_deriv), axis=1)
# max_{0 <= s <= 1} s(1 - s)/2 = 1/8 = 0.125
multiplier = 0.125 * degree * (degree - 1)
# NOTE: worst_case is 1D due to np.max(), so this is the vector norm.
return multiplier * np.linalg.norm(worst_case, ord=2)
|
r"""Compute the maximum error of a linear approximation.
.. note::
There is also a Fortran implementation of this function, which
will be used if it can be built.
.. note::
This is a helper for :class:`.Linearization`, which is used during the
curve-curve intersection process.
We use the line
.. math::
L(s) = v_0 (1 - s) + v_n s
and compute a bound on the maximum error
.. math::
\max_{s \in \left[0, 1\right]} \|B(s) - L(s)\|_2.
Rather than computing the actual maximum (a tight bound), we
use an upper bound via the remainder from Lagrange interpolation
in each component. This leaves us with :math:`\frac{s(s - 1)}{2!}`
times the second derivative in each component.
The second derivative curve is degree :math:`d = n - 2` and
is given by
.. math::
B''(s) = n(n - 1) \sum_{j = 0}^{d} \binom{d}{j} s^j
(1 - s)^{d - j} \cdot \Delta^2 v_j
Due to this form (and the convex combination property of
B |eacute| zier Curves) we know each component of the second derivative
will be bounded by the maximum of that component among the
:math:`\Delta^2 v_j`.
For example, the curve
.. math::
B(s) = \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^2
+ \left[\begin{array}{c} 3 \\ 1 \end{array}\right] 2s(1 - s)
+ \left[\begin{array}{c} 9 \\ -2 \end{array}\right] s^2
has
:math:`B''(s) \equiv \left[\begin{array}{c} 6 \\ -8 \end{array}\right]`
which has norm :math:`10` everywhere, hence the maximum error is
.. math::
\left.\frac{s(1 - s)}{2!} \cdot 10\right|_{s = \frac{1}{2}}
= \frac{5}{4}.
.. image:: ../images/linearization_error.png
:align: center
.. testsetup:: linearization-error, linearization-error-fail
import numpy as np
import bezier
from bezier._geometric_intersection import linearization_error
.. doctest:: linearization-error
>>> nodes = np.asfortranarray([
... [0.0, 3.0, 9.0],
... [0.0, 1.0, -2.0],
... ])
>>> linearization_error(nodes)
1.25
.. testcleanup:: linearization-error
import make_images
make_images.linearization_error(nodes)
As a **non-example**, consider a "pathological" set of control points:
.. math::
B(s) = \left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)^3
+ \left[\begin{array}{c} 5 \\ 12 \end{array}\right] 3s(1 - s)^2
+ \left[\begin{array}{c} 10 \\ 24 \end{array}\right] 3s^2(1 - s)
+ \left[\begin{array}{c} 30 \\ 72 \end{array}\right] s^3
By construction, this lies on the line :math:`y = \frac{12x}{5}`, but
the parametrization is cubic:
:math:`12 \cdot x(s) = 5 \cdot y(s) = 180s(s^2 + 1)`. Hence, the fact
that the curve is a line is not accounted for and we take the worse
case among the nodes in:
.. math::
B''(s) = 3 \cdot 2 \cdot \left(
\left[\begin{array}{c} 0 \\ 0 \end{array}\right] (1 - s)
+ \left[\begin{array}{c} 15 \\ 36 \end{array}\right] s\right)
which gives a nonzero maximum error:
.. doctest:: linearization-error-fail
>>> nodes = np.asfortranarray([
... [0.0, 5.0, 10.0, 30.0],
... [0.0, 12.0, 24.0, 72.0],
... ])
>>> linearization_error(nodes)
29.25
Though it may seem that ``0`` is a more appropriate answer, consider
the **goal** of this function. We seek to linearize curves and then
intersect the linear approximations. Then the :math:`s`-values from
the line-line intersection is lifted back to the curves. Thus
the error :math:`\|B(s) - L(s)\|_2` is more relevant than the
underyling algebraic curve containing :math:`B(s)`.
.. note::
It may be more appropriate to use a **relative** linearization error
rather than the **absolute** error provided here. It's unclear if
the domain :math:`\left[0, 1\right]` means the error is **already**
adequately scaled or if the error should be scaled by the arc
length of the curve or the (easier-to-compute) length of the line.
Args:
nodes (numpy.ndarray): Nodes of a curve.
Returns:
float: The maximum error between the curve and the
linear approximation.
|
def delete_user(self, email):
"""Delete a user from the database
Args:
email(str)
Returns:
user_obj(dict)
"""
LOG.info("Deleting user %s", email)
user_obj = self.user_collection.delete_one({'_id': email})
return user_obj
|
Delete a user from the database
Args:
email(str)
Returns:
user_obj(dict)
|
def random_color(dtype=np.uint8):
"""
Return a random RGB color using datatype specified.
Parameters
----------
dtype: numpy dtype of result
Returns
----------
color: (4,) dtype, random color that looks OK
"""
hue = np.random.random() + .61803
hue %= 1.0
color = np.array(colorsys.hsv_to_rgb(hue, .99, .99))
if np.dtype(dtype).kind in 'iu':
max_value = (2**(np.dtype(dtype).itemsize * 8)) - 1
color *= max_value
color = np.append(color, max_value).astype(dtype)
return color
|
Return a random RGB color using datatype specified.
Parameters
----------
dtype: numpy dtype of result
Returns
----------
color: (4,) dtype, random color that looks OK
|
def add_widget_to_content(self, widget):
"""Subclasses should call this to add content in the section's top level column."""
self.__section_content_column.add_spacing(4)
self.__section_content_column.add(widget)
|
Subclasses should call this to add content in the section's top level column.
|
def initialize(self, stormconf, context):
"""Initialization steps:
1. Prepare sequence of terms based on config: TermCycleSpout/terms.
"""
self.terms = get_config()['TermCycleSpout']['terms']
self.term_seq = itertools.cycle(self.terms)
|
Initialization steps:
1. Prepare sequence of terms based on config: TermCycleSpout/terms.
|
def _draw_image(self, ci):
"""
Draw image object to reportlabs canvas.
:param ci: CanvasImage object
"""
img = img_adjust(ci.image, ci.opacity, tempdir=self.dir)
self.can.drawImage(img, x=ci.x, y=ci.y, width=ci.w, height=ci.h, mask=ci.mask,
preserveAspectRatio=ci.preserve_aspect_ratio, anchorAtXY=True)
|
Draw image object to reportlabs canvas.
:param ci: CanvasImage object
|
def timeout(seconds, error_message=None):
"""Timeout checking just for Linux-like platform, not working in Windows platform."""
def decorated(func):
result = ""
def _handle_timeout(signum, frame):
errmsg = error_message or 'Timeout: The action <%s> is timeout!' % func.__name__
global result
result = None
import inspect
stack_frame = inspect.stack()[4]
file_name = os.path.basename(stack_frame[1])
line_no = stack_frame[2]
method_name = stack_frame[3]
code_text = ','.join(stack_frame[4])
stack_info = 'Stack: %s, %s:%s >%s' % (method_name, file_name, line_no, code_text)
sys.stderr.write(errmsg+'\n')
sys.stderr.write(stack_info+'\n')
raise TimeoutError(errmsg)
@sysx.platform(sysx.UNIX_LIKE, case_false_wraps=func)
def wrapper(*args, **kwargs):
global result
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return functools.wraps(func)(wrapper)
return decorated
|
Timeout checking just for Linux-like platform, not working in Windows platform.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.