code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def pair(args):
"""
%prog pair fastafile
Generate .pairs.fasta and .fragments.fasta by matching records
into the pairs and the rest go to fragments.
"""
p = OptionParser(pair.__doc__)
p.set_sep(sep=None, help="Separator in name to reduce to clone id" +\
"e.g. GFNQ33242/1 use /, BOT01-2453H.b1 use .")
p.add_option("-m", dest="matepairs", default=False, action="store_true",
help="generate .matepairs file [often used for Celera Assembler]")
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
fastafile, = args
qualfile = get_qual(fastafile)
prefix = fastafile.rsplit(".", 1)[0]
pairsfile = prefix + ".pairs.fasta"
fragsfile = prefix + ".frags.fasta"
pairsfw = open(pairsfile, "w")
fragsfw = open(fragsfile, "w")
#TODO: need a class to handle coupled fasta and qual iterating and indexing
if opts.matepairs:
matepairsfile = prefix + ".matepairs"
matepairsfw = open(matepairsfile, "w")
if qualfile:
pairsqualfile = pairsfile + ".qual"
pairsqualhandle = open(pairsqualfile, "w")
fragsqualfile = fragsfile + ".qual"
fragsqualhandle = open(fragsqualfile, "w")
f = Fasta(fastafile)
if qualfile:
q = SeqIO.index(qualfile, "qual")
all_keys = list(f.keys())
all_keys.sort()
sep = opts.sep
if sep:
key_fun = lambda x: x.split(sep, 1)[0]
else:
key_fun = lambda x: x[:-1]
for key, variants in groupby(all_keys, key=key_fun):
variants = list(variants)
paired = (len(variants) == 2)
if paired and opts.matepairs:
print("\t".join(("%s/1" % key, "%s/2" % key)), file=matepairsfw)
fw = pairsfw if paired else fragsfw
if qualfile:
qualfw = pairsqualhandle if paired else fragsqualhandle
for i, var in enumerate(variants):
rec = f[var]
if qualfile:
recqual = q[var]
newid = "%s/%d" % (key, i + 1)
rec.id = newid
rec.description = ""
SeqIO.write([rec], fw, "fasta")
if qualfile:
recqual.id = newid
recqual.description = ""
SeqIO.write([recqual], qualfw, "qual")
logging.debug("sequences written to `%s` and `%s`" % \
(pairsfile, fragsfile))
if opts.matepairs:
logging.debug("mates written to `%s`" % matepairsfile)
|
%prog pair fastafile
Generate .pairs.fasta and .fragments.fasta by matching records
into the pairs and the rest go to fragments.
|
def char_range(starting_char, ending_char):
"""
Create a range generator for chars
"""
assert isinstance(starting_char, str), 'char_range: Wrong argument/s type'
assert isinstance(ending_char, str), 'char_range: Wrong argument/s type'
for char in range(ord(starting_char), ord(ending_char) + 1):
yield chr(char)
|
Create a range generator for chars
|
def _cleanup(self):
""" Cleanup open channels and handlers """
for channel in self._open_channels:
try:
self.disconnect_channel(channel)
except Exception: # pylint: disable=broad-except
pass
for handler in self._handlers.values():
try:
handler.tear_down()
except Exception: # pylint: disable=broad-except
pass
try:
self.socket.close()
except Exception: # pylint: disable=broad-except
self.logger.exception(
"[%s:%s] _cleanup", self.fn or self.host, self.port)
self._report_connection_status(
ConnectionStatus(CONNECTION_STATUS_DISCONNECTED,
NetworkAddress(self.host, self.port)))
self.connecting = True
|
Cleanup open channels and handlers
|
def trimSegments(self, minPermanence=None, minNumSyns=None):
""" This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
Parameters:
--------------------------------------------------------------
minPermanence: Any syn whose permamence is 0 or < minPermanence will
be deleted. If None is passed in, then
self.connectedPerm is used.
minNumSyns: Any segment with less than minNumSyns synapses remaining
in it will be deleted. If None is passed in, then
self.activationThreshold is used.
retval: (numSegsRemoved, numSynsRemoved)
"""
# Fill in defaults
if minPermanence is None:
minPermanence = self.connectedPerm
if minNumSyns is None:
minNumSyns = self.activationThreshold
# Loop through all cells
totalSegsRemoved, totalSynsRemoved = 0, 0
for c,i in product(xrange(self.numberOfCols), xrange(self.cellsPerColumn)):
(segsRemoved, synsRemoved) = self.trimSegmentsInCell(colIdx=c, cellIdx=i,
segList=self.cells[c][i], minPermanence=minPermanence,
minNumSyns=minNumSyns)
totalSegsRemoved += segsRemoved
totalSynsRemoved += synsRemoved
return totalSegsRemoved, totalSynsRemoved
|
This method deletes all synapses whose permanence is less than
minPermanence and deletes any segments that have less than
minNumSyns synapses remaining.
Parameters:
--------------------------------------------------------------
minPermanence: Any syn whose permamence is 0 or < minPermanence will
be deleted. If None is passed in, then
self.connectedPerm is used.
minNumSyns: Any segment with less than minNumSyns synapses remaining
in it will be deleted. If None is passed in, then
self.activationThreshold is used.
retval: (numSegsRemoved, numSynsRemoved)
|
def map_pixel(self, point_x, point_y):
'''
geo.map_pixel(point_x, point_y)
Return value of raster in location
Note: (point_x, point_y) must belong to the geographic coordinate system and
the coverage of the raster
'''
row, col = map_pixel(point_x, point_y,
self.x_cell_size, self.y_cell_size, self.xmin, self.ymax)
try:
return self.raster[row, col]
except:
raise RasterGeoError('Make sure the point belongs to the raster coverage \
and it is in the correct geographic coordinate system.')
|
geo.map_pixel(point_x, point_y)
Return value of raster in location
Note: (point_x, point_y) must belong to the geographic coordinate system and
the coverage of the raster
|
def sizeClassifier(path, min_size=DEFAULTS['min_size']):
"""Sort a file into a group based on on-disk size.
:param paths: See :func:`fastdupes.groupify`
:param min_size: Files smaller than this size (in bytes) will be ignored.
:type min_size: :class:`__builtins__.int`
:returns: See :func:`fastdupes.groupify`
.. todo:: Rework the calling of :func:`~os.stat` to minimize the number of
calls. It's a fairly significant percentage of the time taken according
to the profiler.
"""
filestat = _stat(path)
if stat.S_ISLNK(filestat.st_mode):
return # Skip symlinks.
if filestat.st_size < min_size:
return # Skip files below the size limit
return filestat.st_size
|
Sort a file into a group based on on-disk size.
:param paths: See :func:`fastdupes.groupify`
:param min_size: Files smaller than this size (in bytes) will be ignored.
:type min_size: :class:`__builtins__.int`
:returns: See :func:`fastdupes.groupify`
.. todo:: Rework the calling of :func:`~os.stat` to minimize the number of
calls. It's a fairly significant percentage of the time taken according
to the profiler.
|
def to_dict(mapreduce_yaml):
"""Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
"""
all_configs = []
for config in mapreduce_yaml.mapreduce:
out = {
"name": config.name,
"mapper_input_reader": config.mapper.input_reader,
"mapper_handler": config.mapper.handler,
}
if config.mapper.params_validator:
out["mapper_params_validator"] = config.mapper.params_validator
if config.mapper.params:
param_defaults = {}
for param in config.mapper.params:
param_defaults[param.name] = param.default or param.value
out["mapper_params"] = param_defaults
if config.params:
param_defaults = {}
for param in config.params:
param_defaults[param.name] = param.default or param.value
out["params"] = param_defaults
if config.mapper.output_writer:
out["mapper_output_writer"] = config.mapper.output_writer
all_configs.append(out)
return all_configs
|
Converts a MapReduceYaml file into a JSON-encodable dictionary.
For use in user-visible UI and internal methods for interfacing with
user code (like param validation). as a list
Args:
mapreduce_yaml: The Pyton representation of the mapreduce.yaml document.
Returns:
A list of configuration dictionaries.
|
def xml_to_metrics(xmlstr, object_type):
'''Converts xml response to service bus metrics objects
The xml format for MetricProperties
<entry>
<id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id>
<title/>
<updated>2014-10-09T11:56:50Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Name>listeners.active</d:Name>
<d:PrimaryAggregation>Average</d:PrimaryAggregation>
<d:Unit>Count</d:Unit>
<d:DisplayName>Active listeners</d:DisplayName>
</m:properties>
</content>
</entry>
The xml format for MetricValues
<entry>
<id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id>
<title/>
<updated>2014-10-09T18:38:28Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp>
<d:Min m:type="Edm.Int64">-118</d:Min>
<d:Max m:type="Edm.Int64">15</d:Max>
<d:Average m:type="Edm.Single">-78.44444</d:Average>
<d:Total m:type="Edm.Int64">0</d:Total>
</m:properties>
</content>
</entry>
'''
xmldoc = minidom.parseString(xmlstr)
return_obj = object_type()
members = dict(vars(return_obj))
# Only one entry here
for xml_entry in _MinidomXmlToObject.get_children_from_path(xmldoc,
'entry'):
for node in _MinidomXmlToObject.get_children_from_path(xml_entry,
'content',
'properties'):
for name in members:
xml_name = _get_serialization_name(name)
children = _MinidomXmlToObject.get_child_nodes(node, xml_name)
if not children:
continue
child = children[0]
node_type = child.getAttributeNS("http://schemas.microsoft.com/ado/2007/08/dataservices/metadata", 'type')
node_value = _ServiceBusManagementXmlSerializer.odata_converter(child.firstChild.nodeValue, node_type)
setattr(return_obj, name, node_value)
for name, value in _MinidomXmlToObject.get_entry_properties_from_node(
xml_entry,
include_id=True,
use_title_as_id=False).items():
if name in members:
continue # Do not override if already members
setattr(return_obj, name, value)
return return_obj
|
Converts xml response to service bus metrics objects
The xml format for MetricProperties
<entry>
<id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id>
<title/>
<updated>2014-10-09T11:56:50Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Name>listeners.active</d:Name>
<d:PrimaryAggregation>Average</d:PrimaryAggregation>
<d:Unit>Count</d:Unit>
<d:DisplayName>Active listeners</d:DisplayName>
</m:properties>
</content>
</entry>
The xml format for MetricValues
<entry>
<id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id>
<title/>
<updated>2014-10-09T18:38:28Z</updated>
<author>
<name/>
</author>
<content type="application/xml">
<m:properties>
<d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp>
<d:Min m:type="Edm.Int64">-118</d:Min>
<d:Max m:type="Edm.Int64">15</d:Max>
<d:Average m:type="Edm.Single">-78.44444</d:Average>
<d:Total m:type="Edm.Int64">0</d:Total>
</m:properties>
</content>
</entry>
|
def create(self,
alert_config,
occurrence_frequency_count=None,
occurrence_frequency_unit=None,
alert_frequency_count=None,
alert_frequency_unit=None):
"""
Create a new alert
:param alert_config: A list of AlertConfig classes (Ex:
``[EmailAlertConfig('me@mydomain.com')]``)
:type alert_config: list of
:class:`PagerDutyAlertConfig<logentries_api.alerts.PagerDutyAlertConfig>`,
:class:`WebHookAlertConfig<logentries_api.alerts.WebHookAlertConfig>`,
:class:`EmailAlertConfig<logentries_api.alerts.EmailAlertConfig>`,
:class:`SlackAlertConfig<logentries_api.alerts.SlackAlertConfig>`, or
:class:`HipChatAlertConfig<logentries_api.alerts.HipChatAlertConfig>`
:param occurrence_frequency_count: How many times per
``alert_frequency_unit`` for a match before issuing an alert.
Defaults to 1
:type occurrence_frequency_count: int
:param occurrence_frequency_unit: The time period to monitor for sending
an alert. Must be 'day', or 'hour'. Defaults to 'hour'
:type occurrence_frequency_unit: str
:param alert_frequency_count: How many times per
``alert_frequency_unit`` to issue an alert. Defaults to 1
:type alert_frequency_count: int
:param alert_frequency_unit: How often to regulate sending alerts.
Must be 'day', or 'hour'. Defaults to 'hour'
:type alert_frequency_unit: str
:returns: The response of your post
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
"""
data = {
'rate_count': occurrence_frequency_count or 1,
'rate_range': occurrence_frequency_unit or 'hour',
'limit_count': alert_frequency_count or 1,
'limit_range': alert_frequency_unit or 'hour',
'schedule': [],
'enabled': True,
}
data.update(alert_config.args())
# Yes, it's confusing. the `/actions/` endpoint is used for alerts, while
# the /tags/ endpoint is used for labels.
return self._post(
request=ApiActions.CREATE.value,
uri=ApiUri.ACTIONS.value,
params=data
)
|
Create a new alert
:param alert_config: A list of AlertConfig classes (Ex:
``[EmailAlertConfig('me@mydomain.com')]``)
:type alert_config: list of
:class:`PagerDutyAlertConfig<logentries_api.alerts.PagerDutyAlertConfig>`,
:class:`WebHookAlertConfig<logentries_api.alerts.WebHookAlertConfig>`,
:class:`EmailAlertConfig<logentries_api.alerts.EmailAlertConfig>`,
:class:`SlackAlertConfig<logentries_api.alerts.SlackAlertConfig>`, or
:class:`HipChatAlertConfig<logentries_api.alerts.HipChatAlertConfig>`
:param occurrence_frequency_count: How many times per
``alert_frequency_unit`` for a match before issuing an alert.
Defaults to 1
:type occurrence_frequency_count: int
:param occurrence_frequency_unit: The time period to monitor for sending
an alert. Must be 'day', or 'hour'. Defaults to 'hour'
:type occurrence_frequency_unit: str
:param alert_frequency_count: How many times per
``alert_frequency_unit`` to issue an alert. Defaults to 1
:type alert_frequency_count: int
:param alert_frequency_unit: How often to regulate sending alerts.
Must be 'day', or 'hour'. Defaults to 'hour'
:type alert_frequency_unit: str
:returns: The response of your post
:rtype: dict
:raises: This will raise a
:class:`ServerException<logentries_api.exceptions.ServerException>`
if there is an error from Logentries
|
def save():
"""
Apply configuration changes on all the modules
"""
from .models import ModuleInfo
logger = logging.getLogger(__name__)
logger.info("Saving changes")
# Save + restart
for module in modules():
if module.enabled:
if module.changed:
module.save()
module.restart()
module.commit()
else:
logger.debug('Not saving unchanged module: %s' %
module.verbose_name)
else:
logger.debug('Not saving disabled module: %s' %
module.verbose_name)
# Commit
ModuleInfo.commit()
logger.info("Changes saved")
|
Apply configuration changes on all the modules
|
def work(self):
"""
Use a blocking <sleep> call to periodically trigger a signal.
"""
import time
for val in range(200):
time.sleep(0.1)
self.sigUpdate.emit(val + 1)
|
Use a blocking <sleep> call to periodically trigger a signal.
|
def transform(self, data):
"""
:param data:
:type data: dict
:return:
:rtype:
"""
out=[]
keys = sorted(data.keys())
for k in keys:
out.append("%s=%s" % (k, data[k]))
return "\n".join(out)
|
:param data:
:type data: dict
:return:
:rtype:
|
def next(self, fetch: bool = False, next_symbol: _NextSymbol = DEFAULT_NEXT_SYMBOL) -> _Next:
"""Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
"""
def get_next():
candidates = self.find('a', containing=next_symbol)
for candidate in candidates:
if candidate.attrs.get('href'):
# Support 'next' rel (e.g. reddit).
if 'next' in candidate.attrs.get('rel', []):
return candidate.attrs['href']
# Support 'next' in classnames.
for _class in candidate.attrs.get('class', []):
if 'next' in _class:
return candidate.attrs['href']
if 'page' in candidate.attrs['href']:
return candidate.attrs['href']
try:
# Resort to the last candidate.
return candidates[-1].attrs['href']
except IndexError:
return None
__next = get_next()
if __next:
url = self._make_absolute(__next)
else:
return None
if fetch:
return self.session.get(url)
else:
return url
|
Attempts to find the next page, if there is one. If ``fetch``
is ``True`` (default), returns :class:`HTML <HTML>` object of
next page. If ``fetch`` is ``False``, simply returns the next URL.
|
def run(self, *args):
"""Export data from the registry.
By default, it writes the data to the standard output. If a
positional argument is given, it will write the data on that
file.
"""
params = self.parser.parse_args(args)
with params.outfile as outfile:
if params.identities:
code = self.export_identities(outfile, params.source)
elif params.orgs:
code = self.export_organizations(outfile)
else:
# The running proccess never should reach this section
raise RuntimeError("Unexpected export option")
return code
|
Export data from the registry.
By default, it writes the data to the standard output. If a
positional argument is given, it will write the data on that
file.
|
def forward_remote(
self,
remote_port,
local_port=None,
remote_host="127.0.0.1",
local_host="localhost",
):
"""
Open a tunnel connecting ``remote_port`` to the local environment.
For example, say you're running a daemon in development mode on your
workstation at port 8080, and want to funnel traffic to it from a
production or staging environment.
In most situations this isn't possible as your office/home network
probably blocks inbound traffic. But you have SSH access to this
server, so you can temporarily make port 8080 on that server act like
port 8080 on your workstation::
from fabric import Connection
c = Connection('my-remote-server')
with c.forward_remote(8080):
c.run("remote-data-writer --port 8080")
# Assuming remote-data-writer runs until interrupted, this will
# stay open until you Ctrl-C...
This method is analogous to using the ``-R`` option of OpenSSH's
``ssh`` program.
:param int remote_port: The remote port number on which to listen.
:param int local_port:
The local port number. Defaults to the same value as
``remote_port``.
:param str local_host:
The local hostname/interface the forwarded connection talks to.
Default: ``localhost``.
:param str remote_host:
The remote interface address to listen on when forwarding
connections. Default: ``127.0.0.1`` (i.e. only listen on the remote
localhost).
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
"""
if not local_port:
local_port = remote_port
# Callback executes on each connection to the remote port and is given
# a Channel hooked up to said port. (We don't actually care about the
# source/dest host/port pairs at all; only whether the channel has data
# to read and suchlike.)
# We then pair that channel with a new 'outbound' socket connection to
# the local host/port being forwarded, in a new Tunnel.
# That Tunnel is then added to a shared data structure so we can track
# & close them during shutdown.
#
# TODO: this approach is less than ideal because we have to share state
# between ourselves & the callback handed into the transport's own
# thread handling (which is roughly analogous to our self-controlled
# TunnelManager for local forwarding). See if we can use more of
# Paramiko's API (or improve it and then do so) so that isn't
# necessary.
tunnels = []
def callback(channel, src_addr_tup, dst_addr_tup):
sock = socket.socket()
# TODO: handle connection failure such that channel, etc get closed
sock.connect((local_host, local_port))
# TODO: we don't actually need to generate the Events at our level,
# do we? Just let Tunnel.__init__ do it; all we do is "press its
# button" on shutdown...
tunnel = Tunnel(channel=channel, sock=sock, finished=Event())
tunnel.start()
# Communication between ourselves & the Paramiko handling subthread
tunnels.append(tunnel)
# Ask Paramiko (really, the remote sshd) to call our callback whenever
# connections are established on the remote iface/port.
# transport.request_port_forward(remote_host, remote_port, callback)
try:
self.transport.request_port_forward(
address=remote_host, port=remote_port, handler=callback
)
yield
finally:
# TODO: see above re: lack of a TunnelManager
# TODO: and/or also refactor with TunnelManager re: shutdown logic.
# E.g. maybe have a non-thread TunnelManager-alike with a method
# that acts as the callback? At least then there's a tiny bit more
# encapsulation...meh.
for tunnel in tunnels:
tunnel.finished.set()
tunnel.join()
self.transport.cancel_port_forward(
address=remote_host, port=remote_port
)
|
Open a tunnel connecting ``remote_port`` to the local environment.
For example, say you're running a daemon in development mode on your
workstation at port 8080, and want to funnel traffic to it from a
production or staging environment.
In most situations this isn't possible as your office/home network
probably blocks inbound traffic. But you have SSH access to this
server, so you can temporarily make port 8080 on that server act like
port 8080 on your workstation::
from fabric import Connection
c = Connection('my-remote-server')
with c.forward_remote(8080):
c.run("remote-data-writer --port 8080")
# Assuming remote-data-writer runs until interrupted, this will
# stay open until you Ctrl-C...
This method is analogous to using the ``-R`` option of OpenSSH's
``ssh`` program.
:param int remote_port: The remote port number on which to listen.
:param int local_port:
The local port number. Defaults to the same value as
``remote_port``.
:param str local_host:
The local hostname/interface the forwarded connection talks to.
Default: ``localhost``.
:param str remote_host:
The remote interface address to listen on when forwarding
connections. Default: ``127.0.0.1`` (i.e. only listen on the remote
localhost).
:returns:
Nothing; this method is only useful as a context manager affecting
local operating system state.
.. versionadded:: 2.0
|
def _version_string():
""" Gets the output for `trytravis --version`. """
platform_system = platform.system()
if platform_system == 'Linux':
os_name, os_version, _ = platform.dist()
else:
os_name = platform_system
os_version = platform.version()
python_version = platform.python_version()
return 'trytravis %s (%s %s, python %s)' % (__version__,
os_name.lower(),
os_version,
python_version)
|
Gets the output for `trytravis --version`.
|
def select_eep(self, rorg_func, rorg_type, direction=None, command=None):
''' Set EEP based on FUNC and TYPE '''
# set EEP profile
self.rorg_func = rorg_func
self.rorg_type = rorg_type
self._profile = self.eep.find_profile(self._bit_data, self.rorg, rorg_func, rorg_type, direction, command)
return self._profile is not None
|
Set EEP based on FUNC and TYPE
|
def project_invite(object_id, input_params={}, always_retry=False, **kwargs):
"""
Invokes the /project-xxxx/invite API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Project-Permissions-and-Sharing#API-method%3A-%2Fproject-xxxx%2Finvite
"""
return DXHTTPRequest('/%s/invite' % object_id, input_params, always_retry=always_retry, **kwargs)
|
Invokes the /project-xxxx/invite API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Project-Permissions-and-Sharing#API-method%3A-%2Fproject-xxxx%2Finvite
|
def update_many(self, **kwargs):
""" Update multiple objects from collection.
First ES is queried, then the results are used to query DB.
This is done to make sure updated objects are those filtered
by ES in the 'index' method (so user updates what he saw).
"""
db_objects = self.get_dbcollection_with_es(**kwargs)
return self.Model._update_many(
db_objects, self._json_params, self.request)
|
Update multiple objects from collection.
First ES is queried, then the results are used to query DB.
This is done to make sure updated objects are those filtered
by ES in the 'index' method (so user updates what he saw).
|
def from_triple(cls, triple):
"""
Create a Target instance for the given triple (a string).
"""
with ffi.OutputString() as outerr:
target = ffi.lib.LLVMPY_GetTargetFromTriple(triple.encode('utf8'),
outerr)
if not target:
raise RuntimeError(str(outerr))
target = cls(target)
target._triple = triple
return target
|
Create a Target instance for the given triple (a string).
|
def table_exists(self, table):
"""Returns whether the given table exists.
:param table:
:type table: BQTable
"""
if not self.dataset_exists(table.dataset):
return False
try:
self.client.tables().get(projectId=table.project_id,
datasetId=table.dataset_id,
tableId=table.table_id).execute()
except http.HttpError as ex:
if ex.resp.status == 404:
return False
raise
return True
|
Returns whether the given table exists.
:param table:
:type table: BQTable
|
def read_probes(self, key = None):
"""
function is overloaded:
- read_probes()
- read_probes(key)
Args:
key: name of requested value
Returns:
- if called without argument: returns the values of all probes in dictionary form
- if called with argument: returns the value the requested key
"""
print(('xxxxx probes', key, self._PROBES()))
if key is None:
# return the value all probe in dictionary form
d = {}
for k in list(self._PROBES.keys()):
d[k] = self.read_probes(k)
return d
else:
# return the value of the requested key if the key corresponds to a valid probe
assert key in list(self._PROBES.keys())
value = None
return value
|
function is overloaded:
- read_probes()
- read_probes(key)
Args:
key: name of requested value
Returns:
- if called without argument: returns the values of all probes in dictionary form
- if called with argument: returns the value the requested key
|
def _check_restart_params(self, restart_strategy, min_beta, s_greedy,
xi_restart):
r""" Check restarting parameters
This method checks that the restarting parameters are set and satisfy
the correct assumptions. It also checks that the current mode is
regular (as opposed to CD for now).
Parameters
----------
restart_strategy: str or None
name of the restarting strategy. If None, there is no restarting.
Defaults to None.
min_beta: float or None
the minimum beta when using the greedy restarting strategy.
Defaults to None.
s_greedy: float or None.
parameter for the safeguard comparison in the greedy restarting
strategy. It has to be > 1.
Defaults to None.
xi_restart: float or None.
mutlitplicative parameter for the update of beta in the greedy
restarting strategy and for the update of r_lazy in the adaptive
restarting strategies. It has to be > 1.
Defaults to None.
Returns
-------
bool: True
Raises
------
ValueError
When a parameter that should be set isn't or doesn't verify the
correct assumptions.
"""
if restart_strategy is None:
return True
if self.mode != 'regular':
raise ValueError('Restarting strategies can only be used with '
'regular mode.')
greedy_params_check = (min_beta is None or s_greedy is None or
s_greedy <= 1)
if restart_strategy == 'greedy' and greedy_params_check:
raise ValueError('You need a min_beta and an s_greedy > 1 for '
'greedy restart.')
if xi_restart is None or xi_restart >= 1:
raise ValueError('You need a xi_restart < 1 for restart.')
return True
|
r""" Check restarting parameters
This method checks that the restarting parameters are set and satisfy
the correct assumptions. It also checks that the current mode is
regular (as opposed to CD for now).
Parameters
----------
restart_strategy: str or None
name of the restarting strategy. If None, there is no restarting.
Defaults to None.
min_beta: float or None
the minimum beta when using the greedy restarting strategy.
Defaults to None.
s_greedy: float or None.
parameter for the safeguard comparison in the greedy restarting
strategy. It has to be > 1.
Defaults to None.
xi_restart: float or None.
mutlitplicative parameter for the update of beta in the greedy
restarting strategy and for the update of r_lazy in the adaptive
restarting strategies. It has to be > 1.
Defaults to None.
Returns
-------
bool: True
Raises
------
ValueError
When a parameter that should be set isn't or doesn't verify the
correct assumptions.
|
def _noneload(l: Loader, value, type_) -> None:
"""
Loads a value that can only be None,
so it fails if it isn't
"""
if value is None:
return None
raise TypedloadValueError('Not None', value=value, type_=type_)
|
Loads a value that can only be None,
so it fails if it isn't
|
async def close_async(self, context, reason):
"""
Called by processor host to indicate that the event processor is being stopped.
:param context: Information about the partition
:type context: ~azure.eventprocessorhost.PartitionContext
"""
logger.info("Connection closed (reason {}, id {}, offset {}, sq_number {})".format(
reason,
context.partition_id,
context.offset,
context.sequence_number))
|
Called by processor host to indicate that the event processor is being stopped.
:param context: Information about the partition
:type context: ~azure.eventprocessorhost.PartitionContext
|
def attributes(self) -> Sequence[bytes]:
"""The mailbox attributes that should be returned with the mailbox
in a ``LIST`` response, e.g. ``\\Noselect``.
See Also:
`RFC 3348 <https://tools.ietf.org/html/rfc3348>`_
"""
ret: List[bytes] = []
if not self.exists:
ret.append(b'Noselect')
if self.has_children:
ret.append(b'HasChildren')
else:
ret.append(b'HasNoChildren')
if self.marked is True:
ret.append(b'Marked')
elif self.marked is False:
ret.append(b'Unmarked')
return ret
|
The mailbox attributes that should be returned with the mailbox
in a ``LIST`` response, e.g. ``\\Noselect``.
See Also:
`RFC 3348 <https://tools.ietf.org/html/rfc3348>`_
|
def activate(self, *, filter_func=None):
'''
Activate the type safety checker. After the call all functions
that need to be checked will be.
'''
if self.active:
raise RuntimeError("Type safety check already active")
self.__module_finder = ModuleFinder(Validator.decorate)
if filter_func is not None:
self.__module_finder.set_filter(filter_func)
self.__module_finder.install()
|
Activate the type safety checker. After the call all functions
that need to be checked will be.
|
def __init(self):
""" initializes all the properties """
params = {
"f" : "json"
}
json_dict = self._get(url=self._url, param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
attributes = [attr for attr in dir(self)
if not attr.startswith('__') and \
not attr.startswith('_')]
for k,v in json_dict.items():
if k in attributes:
setattr(self, "_"+ k, json_dict[k])
else:
print( k, " - attribute not implemented in RouteNetworkLayer.")
del k,v
|
initializes all the properties
|
def is_reached(self, uid=None):
"""
is_reached is to be called for every object that counts towards the limit.
- When called with no uid, the Limiter assumes this is a new object and
unconditionally increments the counter (less CPU and memory usage).
- When a given object can be passed multiple times, a uid must be provided to
deduplicate calls. Only the first occurrence of a uid will increment the counter.
:param uid: (optional) unique identifier of the object, to deduplicate calls
:returns: boolean, true if limit exceeded
"""
if self.reached_limit:
return True
if uid:
if uid in self.seen:
return False
self.count += 1
self.seen.add(uid)
else:
self.count += 1
if self.count > self.limit:
if self.warning:
self.warning(
"Check {} exceeded limit of {} {}, ignoring next ones".format(
self.check_name, self.limit, self.name
)
)
self.reached_limit = True
return True
return False
|
is_reached is to be called for every object that counts towards the limit.
- When called with no uid, the Limiter assumes this is a new object and
unconditionally increments the counter (less CPU and memory usage).
- When a given object can be passed multiple times, a uid must be provided to
deduplicate calls. Only the first occurrence of a uid will increment the counter.
:param uid: (optional) unique identifier of the object, to deduplicate calls
:returns: boolean, true if limit exceeded
|
def PhenomModel(self, r):
"""Fit to field map
A phenomenological fit by Ryan Bayes (Glasgow) to a field map
generated by Bob Wands (FNAL). It assumes a 1 cm plate. This is dated
January 30th, 2012. Not defined for r <= 0"""
if r <= 0:
raise ValueError
field = self.B0 + self.B1 * G4.m / r + self.B2 * math.exp(-1 * self.H * r / G4.m)
return field
|
Fit to field map
A phenomenological fit by Ryan Bayes (Glasgow) to a field map
generated by Bob Wands (FNAL). It assumes a 1 cm plate. This is dated
January 30th, 2012. Not defined for r <= 0
|
def plot(x, fmt='-', marker=None, markers=None, linestyle=None, linestyles=None,
color=None, colors=None, palette='hls', group=None, hue=None,
labels=None, legend=None, title=None, size=None, elev=10, azim=-60,
ndims=3, model=None, model_params=None, reduce='IncrementalPCA',
cluster=None, align=None, normalize=None, n_clusters=None,
save_path=None, animate=False, duration=30, tail_duration=2,
rotations=2, zoom=1, chemtrails=False, precog=False, bullettime=False,
frame_rate=50, explore=False, show=True, transform=None,
vectorizer='CountVectorizer', semantic='LatentDirichletAllocation',
corpus='wiki', ax=None):
"""
Plots dimensionality reduced data and parses plot arguments
Parameters
----------
x : Numpy array, DataFrame, String, Geo or mixed list
Data for the plot. The form should be samples (rows) by features (cols).
fmt : str or list of strings
A list of format strings. All matplotlib format strings are supported.
linestyle(s) : str or list of str
A list of line styles
marker(s) : str or list of str
A list of marker types
color(s) : str or list of str
A list of marker types
palette : str
A matplotlib or seaborn color palette
group : str/int/float or list
A list of group labels. Length must match the number of rows in your
dataset. If the data type is numerical, the values will be mapped to
rgb values in the specified palette. If the data type is strings,
the points will be labeled categorically. To label a subset of points,
use None (i.e. ['a', None, 'b','a']).
labels : list
A list of labels for each point. Must be dimensionality of data (x).
If no label is wanted for a particular point, input None.
legend : list or bool
If set to True, legend is implicitly computed from data. Passing a
list will add string labels to the legend (one for each list item).
title : str
A title for the plot
size : list
A list of [width, height] in inches to resize the figure
normalize : str or False
If set to 'across', the columns of the input data will be z-scored
across lists (default). If set to 'within', the columns will be
z-scored within each list that is passed. If set to 'row', each row of
the input data will be z-scored. If set to False, the input data will
be returned (default is False).
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
An `int` representing the number of dims to reduce the data x
to. If ndims > 3, will plot in 3 dimensions but return the higher
dimensional data. Default is None, which will plot data in 3
dimensions and return the data with the same number of dimensions
possibly normalized and/or aligned according to normalize/align
kwargs.
align : str or dict or False/None
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
cluster : str or dict or False/None
If cluster is passed, HyperTools will perform clustering using the
specified clustering clustering model. Supportted algorithms are:
KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch,
FeatureAgglomeration, SpectralClustering and HDBSCAN (default: None).
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g.
reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See
scikit-learn specific model docs for details on parameters supported for
each model. If no parameters are specified in the string a default set
of parameters will be used.
n_clusters : int
If n_clusters is passed, HyperTools will perform k-means clustering
with the k parameter set to n_clusters. The resulting clusters will
be plotted in different colors according to the color palette.
save_path : str
Path to save the image/movie. Must include the file extension in the
save path (i.e. save_path='/path/to/file/image.png'). NOTE: If saving
an animation, FFMPEG must be installed (this is a matplotlib req).
FFMPEG can be easily installed on a mac via homebrew brew install
ffmpeg or linux via apt-get apt-get install ffmpeg. If you don't
have homebrew (mac only), you can install it like this:
/usr/bin/ruby -e "$(curl -fsSL
https://raw.githubusercontent.com/Homebrew/install/master/install)".
animate : bool, 'parallel' or 'spin'
If True or 'parallel', plots the data as an animated trajectory, with
each dataset plotted simultaneously. If 'spin', all the data is plotted
at once but the camera spins around the plot (default: False).
duration (animation only) : float
Length of the animation in seconds (default: 30 seconds)
tail_duration (animation only) : float
Sets the length of the tail of the data (default: 2 seconds)
rotations (animation only) : float
Number of rotations around the box (default: 2)
zoom (animation only) : float
How far to zoom into the plot, positive numbers will zoom in (default: 0)
chemtrails (animation only) : bool
A low-opacity trail is left behind the trajectory (default: False).
precog (animation only) : bool
A low-opacity trail is plotted ahead of the trajectory (default: False).
bullettime (animation only) : bool
A low-opacity trail is plotted ahead and behind the trajectory
(default: False).
frame_rate (animation only) : int or float
Frame rate for animation (default: 50)
explore : bool
Displays user defined labels will appear on hover. If no labels are
passed, the point index and coordinate will be plotted. To use,
set explore=True. Note: Explore mode is currently only supported
for 3D static plots, and is an experimental feature (i.e it may not yet
work properly).
show : bool
If set to False, the figure will not be displayed, but the figure,
axis and data objects will still be returned (default: True).
transform : list of numpy arrays or None
The transformed data, bypasses transformations if this is set
(default : None).
vectorizer : str, dict, class or class instance
The vectorizer to use. Built-in options are 'CountVectorizer' or
'TfidfVectorizer'. To change default parameters, set to a dictionary
e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text
for details. You can also specify your own vectorizer model as a class,
or class instance. With either option, the class must have a
fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to vectorizer_params. If
a class instance, no parameters can be passed.
semantic : str, dict, class or class instance
Text model to use to transform text data. Built-in options are
'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default
parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' :
{'n_components' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
for details on the two model options. You can also specify your own
text model as a class, or class instance. With either option, the class
must have a fit_transform method (see here:
http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to text_params. If
a class instance, no parameters can be passed.
corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'.
Text to use to fit the semantic model (optional). If set to 'wiki', 'nips'
or 'sotus' and the default semantic and vectorizer models are used, a
pretrained model will be loaded which can save a lot of time.
ax : matplotlib.Axes
Axis handle to plot the figure
Returns
----------
geo : hypertools.DataGeometry
A new data geometry object
"""
# warnings for deprecated API args
if (model is not None) or (model_params is not None):
warnings.warn('Model and model_params arguments will be deprecated. Please use \
reduce keyword argument. See docs for details: http://hypertools.readthedocs.io/en/latest/hypertools.plot.html#hypertools.plot')
reduce = {}
reduce['model'] = model
reduce['params'] = model_params
if group is not None:
warnings.warn('Group will be deprecated. Please use '
'hue keyword argument. See docs for details: ' 'http://hypertools.readthedocs.io/en/latest/hypertools.plot.html#hypertools.plot')
hue = group
if ax is not None:
if ndims>2:
if ax.name!='3d':
raise ValueError('If passing ax and the plot is 3D, ax must '
'also be 3d')
text_args = {
'vectorizer' : vectorizer,
'semantic' : semantic,
'corpus' : corpus
}
# analyze the data
if transform is None:
raw = format_data(x, **text_args)
xform = analyze(raw, ndims=ndims, normalize=normalize, reduce=reduce,
align=align, internal=True)
else:
xform = transform
# Return data that has been normalized and possibly reduced and/or aligned
xform_data = copy.copy(xform)
# catch all matplotlib kwargs here to pass on
mpl_kwargs = {}
# handle color (to be passed onto matplotlib)
if color is not None:
mpl_kwargs['color'] = color
if colors is not None:
mpl_kwargs['color'] = colors
warnings.warn('Both color and colors defined: color will be ignored \
in favor of colors.')
# handle linestyle (to be passed onto matplotlib)
if linestyle is not None:
mpl_kwargs['linestyle'] = linestyle
if linestyles is not None:
mpl_kwargs['linestyle'] = linestyles
warnings.warn('Both linestyle and linestyles defined: linestyle \
will be ignored in favor of linestyles.')
# handle marker (to be passed onto matplotlib)
if marker is not None:
mpl_kwargs['marker'] = marker
if markers is not None:
mpl_kwargs['marker'] = markers
warnings.warn('Both marker and markers defined: marker will be \
ignored in favor of markers.')
# reduce data to 3 dims for plotting, if ndims is None, return this
if (ndims and ndims < 3):
xform = reducer(xform, ndims=ndims, reduce=reduce, internal=True)
else:
xform = reducer(xform, ndims=3, reduce=reduce, internal=True)
# find cluster and reshape if n_clusters
if cluster is not None:
if hue is not None:
warnings.warn('cluster overrides hue, ignoring hue.')
if isinstance(cluster, (six.string_types, six.binary_type)):
model = cluster
params = default_params(model)
elif isinstance(cluster, dict):
model = cluster['model']
params = default_params(model, cluster['params'])
else:
raise ValueError('Invalid cluster model specified; should be'
' string or dictionary!')
if n_clusters is not None:
if cluster in ('HDBSCAN',):
warnings.warn('n_clusters is not a valid parameter for '
'HDBSCAN clustering and will be ignored.')
else:
params['n_clusters'] = n_clusters
cluster_labels = clusterer(xform, cluster={'model': model,
'params': params})
xform, labels = reshape_data(xform, cluster_labels, labels)
hue = cluster_labels
elif n_clusters is not None:
# If cluster was None default to KMeans
cluster_labels = clusterer(xform, cluster='KMeans', n_clusters=n_clusters)
xform, labels = reshape_data(xform, cluster_labels, labels)
if hue is not None:
warnings.warn('n_clusters overrides hue, ignoring hue.')
# group data if there is a grouping var
elif hue is not None:
if color is not None:
warnings.warn("Using group, color keyword will be ignored.")
# if list of lists, unpack
if any(isinstance(el, list) for el in hue):
hue = list(itertools.chain(*hue))
# if all of the elements are numbers, map them to colors
if all(isinstance(el, int) or isinstance(el, float) for el in hue):
hue = vals2bins(hue)
elif all(isinstance(el, str) for el in hue):
hue = group_by_category(hue)
# reshape the data according to group
if n_clusters is None:
xform, labels = reshape_data(xform, hue, labels)
# interpolate lines if they are grouped
if is_line(fmt):
xform = patch_lines(xform)
# handle legend
if legend is not None:
if legend is False:
legend = None
elif legend is True and hue is not None:
legend = [item for item in sorted(set(hue), key=list(hue).index)]
elif legend is True and hue is None:
legend = [i + 1 for i in range(len(xform))]
mpl_kwargs['label'] = legend
# interpolate if its a line plot
if fmt is None or isinstance(fmt, six.string_types):
if is_line(fmt):
if xform[0].shape[0] > 1:
xform = interp_array_list(xform, interp_val=frame_rate*duration/(xform[0].shape[0] - 1))
elif type(fmt) is list:
for idx, xi in enumerate(xform):
if is_line(fmt[idx]):
if xi.shape[0] > 1:
xform[idx] = interp_array_list(xi, interp_val=frame_rate*duration/(xi.shape[0] - 1))
# handle explore flag
if explore:
assert xform[0].shape[1] is 3, "Explore mode is currently only supported for 3D plots."
mpl_kwargs['picker']=True
# center
xform = center(xform)
# scale
xform = scale(xform)
# handle palette with seaborn
if isinstance(palette, np.bytes_):
palette = palette.decode("utf-8")
sns.set_palette(palette=palette, n_colors=len(xform))
sns.set_style(style='whitegrid')
# turn kwargs into a list
kwargs_list = parse_kwargs(xform, mpl_kwargs)
# handle format strings
if fmt is not None:
if type(fmt) is not list:
draw_fmt = [fmt for i in xform]
else:
draw_fmt = fmt
else:
draw_fmt = ['-']*len(x)
# convert all nans to zeros
for i, xi in enumerate(xform):
xform[i] = np.nan_to_num(xi)
# draw the plot
fig, ax, data, line_ani = _draw(xform, fmt=draw_fmt,
kwargs_list=kwargs_list,
labels=labels,
legend=legend,
title=title,
animate=animate,
duration=duration,
tail_duration=tail_duration,
rotations=rotations,
zoom=zoom,
chemtrails=chemtrails,
precog=precog,
bullettime=bullettime,
frame_rate=frame_rate,
elev=elev,
azim=azim,
explore=explore,
show=show,
size=size,
ax=ax)
# tighten layout
plt.tight_layout()
# save
if save_path is not None:
if animate:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=frame_rate, bitrate=1800)
line_ani.save(save_path, writer=writer)
else:
plt.savefig(save_path)
# show the plot
if show:
plt.show()
else:
# safely closes the plot so it doesn't pop up in another call to this function
plt.close('all')
# gather reduce params
if isinstance(reduce, dict):
reduce_dict = reduce
else:
reduce_dict = {
'model' : reduce,
'params' : {
'n_components' : ndims
},
}
# gather align params
if isinstance(align, dict):
align_dict = align
else:
align_dict = {
'model' : align,
'params' : {}
}
# gather all other kwargs
kwargs = {
'fmt' : fmt,
'marker': marker,
'markers' : markers,
'linestyle' : linestyle,
'linestyles' : linestyles,
'color' : color,
'colors' : colors,
'palette' : palette,
'hue' : hue,
'ndims' : ndims,
'labels' : labels,
'legend' : legend,
'title' : title,
'animate' : animate,
'duration' : duration,
'tail_duration' : tail_duration,
'rotations' : rotations,
'zoom' : zoom,
'chemtrails' : chemtrails,
'precog' : precog,
'bullettime' : bullettime,
'frame_rate' : frame_rate,
'elev' : elev,
'azim' : azim,
'explore' : explore,
'n_clusters' : n_clusters,
'size' : size
}
# turn lists into np arrays so that they don't turn into pickles when saved
for kwarg in kwargs:
if isinstance(kwargs[kwarg], list):
try:
kwargs[kwarg]=np.array(kwargs[kwarg])
except:
warnings.warn('Could not convert all list arguments to numpy '
'arrays. If list is longer than 256 items, it '
'will automatically be pickled, which could '
'cause Python 2/3 compatibility issues for the '
'DataGeometry object.')
return DataGeometry(fig=fig, ax=ax, data=x, xform_data=xform_data,
line_ani=line_ani, reduce=reduce_dict, align=align_dict,
normalize=normalize, semantic=semantic,
vectorizer=vectorizer, corpus=corpus, kwargs=kwargs)
|
Plots dimensionality reduced data and parses plot arguments
Parameters
----------
x : Numpy array, DataFrame, String, Geo or mixed list
Data for the plot. The form should be samples (rows) by features (cols).
fmt : str or list of strings
A list of format strings. All matplotlib format strings are supported.
linestyle(s) : str or list of str
A list of line styles
marker(s) : str or list of str
A list of marker types
color(s) : str or list of str
A list of marker types
palette : str
A matplotlib or seaborn color palette
group : str/int/float or list
A list of group labels. Length must match the number of rows in your
dataset. If the data type is numerical, the values will be mapped to
rgb values in the specified palette. If the data type is strings,
the points will be labeled categorically. To label a subset of points,
use None (i.e. ['a', None, 'b','a']).
labels : list
A list of labels for each point. Must be dimensionality of data (x).
If no label is wanted for a particular point, input None.
legend : list or bool
If set to True, legend is implicitly computed from data. Passing a
list will add string labels to the legend (one for each list item).
title : str
A title for the plot
size : list
A list of [width, height] in inches to resize the figure
normalize : str or False
If set to 'across', the columns of the input data will be z-scored
across lists (default). If set to 'within', the columns will be
z-scored within each list that is passed. If set to 'row', each row of
the input data will be z-scored. If set to False, the input data will
be returned (default is False).
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
An `int` representing the number of dims to reduce the data x
to. If ndims > 3, will plot in 3 dimensions but return the higher
dimensional data. Default is None, which will plot data in 3
dimensions and return the data with the same number of dimensions
possibly normalized and/or aligned according to normalize/align
kwargs.
align : str or dict or False/None
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
cluster : str or dict or False/None
If cluster is passed, HyperTools will perform clustering using the
specified clustering clustering model. Supportted algorithms are:
KMeans, MiniBatchKMeans, AgglomerativeClustering, Birch,
FeatureAgglomeration, SpectralClustering and HDBSCAN (default: None).
Can be passed as a string, but for finer control of the model
parameters, pass as a dictionary, e.g.
reduce={'model' : 'KMeans', 'params' : {'max_iter' : 100}}. See
scikit-learn specific model docs for details on parameters supported for
each model. If no parameters are specified in the string a default set
of parameters will be used.
n_clusters : int
If n_clusters is passed, HyperTools will perform k-means clustering
with the k parameter set to n_clusters. The resulting clusters will
be plotted in different colors according to the color palette.
save_path : str
Path to save the image/movie. Must include the file extension in the
save path (i.e. save_path='/path/to/file/image.png'). NOTE: If saving
an animation, FFMPEG must be installed (this is a matplotlib req).
FFMPEG can be easily installed on a mac via homebrew brew install
ffmpeg or linux via apt-get apt-get install ffmpeg. If you don't
have homebrew (mac only), you can install it like this:
/usr/bin/ruby -e "$(curl -fsSL
https://raw.githubusercontent.com/Homebrew/install/master/install)".
animate : bool, 'parallel' or 'spin'
If True or 'parallel', plots the data as an animated trajectory, with
each dataset plotted simultaneously. If 'spin', all the data is plotted
at once but the camera spins around the plot (default: False).
duration (animation only) : float
Length of the animation in seconds (default: 30 seconds)
tail_duration (animation only) : float
Sets the length of the tail of the data (default: 2 seconds)
rotations (animation only) : float
Number of rotations around the box (default: 2)
zoom (animation only) : float
How far to zoom into the plot, positive numbers will zoom in (default: 0)
chemtrails (animation only) : bool
A low-opacity trail is left behind the trajectory (default: False).
precog (animation only) : bool
A low-opacity trail is plotted ahead of the trajectory (default: False).
bullettime (animation only) : bool
A low-opacity trail is plotted ahead and behind the trajectory
(default: False).
frame_rate (animation only) : int or float
Frame rate for animation (default: 50)
explore : bool
Displays user defined labels will appear on hover. If no labels are
passed, the point index and coordinate will be plotted. To use,
set explore=True. Note: Explore mode is currently only supported
for 3D static plots, and is an experimental feature (i.e it may not yet
work properly).
show : bool
If set to False, the figure will not be displayed, but the figure,
axis and data objects will still be returned (default: True).
transform : list of numpy arrays or None
The transformed data, bypasses transformations if this is set
(default : None).
vectorizer : str, dict, class or class instance
The vectorizer to use. Built-in options are 'CountVectorizer' or
'TfidfVectorizer'. To change default parameters, set to a dictionary
e.g. {'model' : 'CountVectorizer', 'params' : {'max_features' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.feature_extraction.text
for details. You can also specify your own vectorizer model as a class,
or class instance. With either option, the class must have a
fit_transform method (see here: http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to vectorizer_params. If
a class instance, no parameters can be passed.
semantic : str, dict, class or class instance
Text model to use to transform text data. Built-in options are
'LatentDirichletAllocation' or 'NMF' (default: LDA). To change default
parameters, set to a dictionary e.g. {'model' : 'NMF', 'params' :
{'n_components' : 10}}. See
http://scikit-learn.org/stable/modules/classes.html#module-sklearn.decomposition
for details on the two model options. You can also specify your own
text model as a class, or class instance. With either option, the class
must have a fit_transform method (see here:
http://scikit-learn.org/stable/data_transforms.html).
If a class, pass any parameters as a dictionary to text_params. If
a class instance, no parameters can be passed.
corpus : list (or list of lists) of text samples or 'wiki', 'nips', 'sotus'.
Text to use to fit the semantic model (optional). If set to 'wiki', 'nips'
or 'sotus' and the default semantic and vectorizer models are used, a
pretrained model will be loaded which can save a lot of time.
ax : matplotlib.Axes
Axis handle to plot the figure
Returns
----------
geo : hypertools.DataGeometry
A new data geometry object
|
def delete_group(self, group_id):
"""
Remove a group from your team
:param group_id: Id of group
"""
url = self.TEAM_GROUPS_ID_URL % group_id
connection = Connection(self.token)
connection.set_url(self.production, url)
return connection.delete_request()
|
Remove a group from your team
:param group_id: Id of group
|
def ensure_chosen_alternatives_are_in_user_alt_ids(choice_col,
wide_data,
availability_vars):
"""
Ensures that all chosen alternatives in `wide_df` are present in the
`availability_vars` dict. Raises a helpful ValueError if not.
Parameters
----------
choice_col : str.
Denotes the column in `wide_data` that contains a one if the
alternative pertaining to the given row was the observed outcome for
the observation pertaining to the given row and a zero otherwise.
wide_data : pandas dataframe.
Contains one row for each observation. Should contain the specified
`choice_col` column.
availability_vars : dict.
There should be one key value pair for each alternative that is
observed in the dataset. Each key should be the alternative id for the
alternative, and the value should be the column heading in `wide_data`
that denotes (using ones and zeros) whether an alternative is
available/unavailable, respectively, for a given observation.
Alternative id's, i.e. the keys, must be integers.
Returns
-------
None.
"""
if not wide_data[choice_col].isin(availability_vars.keys()).all():
msg = "One or more values in wide_data[choice_col] is not in the user "
msg_2 = "provided alternative ids in availability_vars.keys()"
raise ValueError(msg + msg_2)
return None
|
Ensures that all chosen alternatives in `wide_df` are present in the
`availability_vars` dict. Raises a helpful ValueError if not.
Parameters
----------
choice_col : str.
Denotes the column in `wide_data` that contains a one if the
alternative pertaining to the given row was the observed outcome for
the observation pertaining to the given row and a zero otherwise.
wide_data : pandas dataframe.
Contains one row for each observation. Should contain the specified
`choice_col` column.
availability_vars : dict.
There should be one key value pair for each alternative that is
observed in the dataset. Each key should be the alternative id for the
alternative, and the value should be the column heading in `wide_data`
that denotes (using ones and zeros) whether an alternative is
available/unavailable, respectively, for a given observation.
Alternative id's, i.e. the keys, must be integers.
Returns
-------
None.
|
def labels(self):
"""Tuple of labels."""
return tuple(_Label(label.get('id'), label.get('color'), label.text) for label
in self.root.iter('label'))
|
Tuple of labels.
|
def show(self):
'''Output for command sos show'''
textWidth = max(60, shutil.get_terminal_size((80, 20)).columns)
text = f' {self.step_name() + ":":<21} ' + self.comment
print('\n'.join(
textwrap.wrap(
text,
width=textWidth,
initial_indent='',
subsequent_indent=' ' * 24)))
local_parameters = {
x: y
for x, y in self.parameters.items()
if x not in self.global_parameters
}
if local_parameters:
print(' Workflow Options:')
for name, (value, comment) in local_parameters.items():
par_str = f' {format_par(name, value)}'
print(par_str)
if comment:
print('\n'.join(
textwrap.wrap(
comment,
width=textWidth,
initial_indent=' ' * 24,
subsequent_indent=' ' * 24)))
|
Output for command sos show
|
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return NotifyMessage(key)
if key not in NotifyMessage._member_map_:
extend_enum(NotifyMessage, key, default)
return NotifyMessage[key]
|
Backport support for original codes.
|
def evaluate(self, node: InstanceNode) -> XPathValue:
"""Evaluate the receiver and return the result.
Args:
node: Context node for XPath evaluation.
Raises:
XPathTypeError: If a subexpression of the receiver is of a wrong
type.
"""
return self._eval(XPathContext(node, node, 1, 1))
|
Evaluate the receiver and return the result.
Args:
node: Context node for XPath evaluation.
Raises:
XPathTypeError: If a subexpression of the receiver is of a wrong
type.
|
def xor(key, data):
"""
Perform cyclical exclusive or operations on ``data``.
The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If
the key is smaller than the provided ``data``, the ``key`` will be
repeated.
Args:
key(int or bytes): The key to xor ``data`` with.
data(bytes): The data to perform the xor operation on.
Returns:
bytes: The result of the exclusive or operation.
Examples:
>>> from pwny import *
>>> xor(5, b'ABCD')
b'DGFA'
>>> xor(5, b'DGFA')
b'ABCD'
>>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
b'15-=51)19=%5=9!)!%=-%!9!)-'
>>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-')
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
"""
if type(key) is int:
key = six.int2byte(key)
key_len = len(key)
return b''.join(
six.int2byte(c ^ six.indexbytes(key, i % key_len))
for i, c in enumerate(six.iterbytes(data))
)
|
Perform cyclical exclusive or operations on ``data``.
The ``key`` can be a an integer *(0 <= key < 256)* or a byte sequence. If
the key is smaller than the provided ``data``, the ``key`` will be
repeated.
Args:
key(int or bytes): The key to xor ``data`` with.
data(bytes): The data to perform the xor operation on.
Returns:
bytes: The result of the exclusive or operation.
Examples:
>>> from pwny import *
>>> xor(5, b'ABCD')
b'DGFA'
>>> xor(5, b'DGFA')
b'ABCD'
>>> xor(b'pwny', b'ABCDEFGHIJKLMNOPQRSTUVWXYZ')
b'15-=51)19=%5=9!)!%=-%!9!)-'
>>> xor(b'pwny', b'15-=51)19=%5=9!)!%=-%!9!)-')
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
if self.apiopts.get('enable_sessions', True) is False:
url_blacklist = ['login', 'logout', 'minions', 'jobs']
else:
url_blacklist = []
urls = ((url, cls) for url, cls in six.iteritems(self.url_map)
if url not in url_blacklist)
for url, cls in urls:
setattr(self, url, cls())
|
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
|
def macro_network():
"""A network of micro elements which has greater integrated information
after coarse graining to a macro scale.
"""
tpm = np.array([[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 1.0, 1.0],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 1.0, 1.0],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 0.3, 0.3],
[0.3, 0.3, 1.0, 1.0],
[1.0, 1.0, 0.3, 0.3],
[1.0, 1.0, 0.3, 0.3],
[1.0, 1.0, 0.3, 0.3],
[1.0, 1.0, 1.0, 1.0]])
return Network(tpm, node_labels=LABELS[:tpm.shape[1]])
|
A network of micro elements which has greater integrated information
after coarse graining to a macro scale.
|
def screen_resolution():
"""
Returns the current screen's resolution.
Should be multi-platform.
:return: A tuple containing the width and height of the screen.
"""
w = 0
h = 0
try:
# Windows
import ctypes
user32 = ctypes.windll.user32
w, h = user32.GetSystemMetrics(0), user32.GetSystemMetrics(1)
except AttributeError:
try:
# Mac OS X
import AppKit
size = AppKit.NSScreen.screens()[0].frame().size
w, h = int(size.width), int(size.height)
except ImportError:
try:
# Linux
import Xlib
import Xlib.display
display = Xlib.display.Display()
root = display.screen().root
size = root.get_geometry()
w, h = size.width, size.height
except ImportError:
w = 1920
h = 1080
return w, h
|
Returns the current screen's resolution.
Should be multi-platform.
:return: A tuple containing the width and height of the screen.
|
def add(self, source, email=None, name=None, username=None, uuid=None,
matching=None, interactive=False):
"""Add an identity to the registry.
This method adds a new identity to the registry. By default, a new
unique identity will be also added an associated to the new identity.
When <uuid> parameter is set, it only creates a new identity that will be
associated to a unique identity defined by <uuid>.
The method will print the uuids associated to the new registered identity.
Optionally, this method can look for possible identities that match with
the new one to insert. If a match is found, that means both identities are
likely the same. Therefore, both identities would be merged into one. The
algorithm used to search for matches will be defined by <matching> parameter.
Please take into account that both unique identities will be always merged
into the one from the registry, not into the new one.
When <interactive> parameter is set to True, the user will have to confirm
whether these to identities should be merged into one. By default, the method
is set to False.
:param source: data source
:param email: email of the identity
:param name: full name of the identity
:param username: user name used by the identity
:param uuid: associates the new identity to the unique identity
identified by this id
:param matching: type of matching used to merge existing identities
:param interactive: interactive mode for merging identities, only available
when <matching> parameter is set
"""
matcher = None
if matching:
try:
blacklist = api.blacklist(self.db)
matcher = create_identity_matcher(matching, blacklist)
except MatcherNotSupportedError as e:
self.error(str(e))
return e.code
try:
new_uuid = api.add_identity(self.db, source, email, name, username, uuid)
uuid = uuid or new_uuid
self.display('add.tmpl', id=new_uuid, uuid=uuid)
if matcher:
self.__merge_on_matching(uuid, matcher, interactive)
except AlreadyExistsError as e:
msg = "unique identity '%s' already exists in the registry" % e.eid
self.error(msg)
return e.code
except (NotFoundError, InvalidValueError) as e:
self.error(str(e))
return e.code
return CMD_SUCCESS
|
Add an identity to the registry.
This method adds a new identity to the registry. By default, a new
unique identity will be also added an associated to the new identity.
When <uuid> parameter is set, it only creates a new identity that will be
associated to a unique identity defined by <uuid>.
The method will print the uuids associated to the new registered identity.
Optionally, this method can look for possible identities that match with
the new one to insert. If a match is found, that means both identities are
likely the same. Therefore, both identities would be merged into one. The
algorithm used to search for matches will be defined by <matching> parameter.
Please take into account that both unique identities will be always merged
into the one from the registry, not into the new one.
When <interactive> parameter is set to True, the user will have to confirm
whether these to identities should be merged into one. By default, the method
is set to False.
:param source: data source
:param email: email of the identity
:param name: full name of the identity
:param username: user name used by the identity
:param uuid: associates the new identity to the unique identity
identified by this id
:param matching: type of matching used to merge existing identities
:param interactive: interactive mode for merging identities, only available
when <matching> parameter is set
|
def q(self, x, q0):
"""
Numerically solved trajectory function for initial conditons :math:`q(0) = q_0` and :math:`q'(0) = 0`.
"""
y1_0 = q0
y0_0 = 0
y0 = [y0_0, y1_0]
y = _sp.integrate.odeint(self._func, y0, x, Dfun=self._gradient, rtol=self.rtol, atol=self.atol)
return y[:, 1]
|
Numerically solved trajectory function for initial conditons :math:`q(0) = q_0` and :math:`q'(0) = 0`.
|
def has_perm(self, user, perm, obj=None, *args, **kwargs):
"""Test user permissions for a single action and object.
:param user: The user to test.
:type user: ``User``
:param perm: The action to test.
:type perm: ``str``
:param obj: The object path to test.
:type obj: ``tutelary.engine.Object``
:returns: ``bool`` -- is the action permitted?
"""
try:
if not self._obj_ok(obj):
if hasattr(obj, 'get_permissions_object'):
obj = obj.get_permissions_object(perm)
else:
raise InvalidPermissionObjectException
return user.permset_tree.allow(Action(perm), obj)
except ObjectDoesNotExist:
return False
|
Test user permissions for a single action and object.
:param user: The user to test.
:type user: ``User``
:param perm: The action to test.
:type perm: ``str``
:param obj: The object path to test.
:type obj: ``tutelary.engine.Object``
:returns: ``bool`` -- is the action permitted?
|
def authenticate(self, name, password, mechanism="DEFAULT"):
"""
Send an authentication command for this database.
mostly stolen from pymongo
"""
if not isinstance(name, (bytes, unicode)):
raise TypeError("TxMongo: name must be an instance of basestring.")
if not isinstance(password, (bytes, unicode)):
raise TypeError("TxMongo: password must be an instance of basestring.")
"""
Authenticating
"""
return self.connection.authenticate(self, name, password, mechanism)
|
Send an authentication command for this database.
mostly stolen from pymongo
|
def p_new_expr(self, p):
"""new_expr : member_expr
| NEW new_expr
"""
if len(p) == 2:
p[0] = p[1]
else:
p[0] = ast.NewExpr(p[2])
|
new_expr : member_expr
| NEW new_expr
|
def parse(query_string, unquote=True, normalized=False, encoding=DEFAULT_ENCODING):
'''
Main parse function
@param query_string:
@param unquote: unquote html query string ?
@param encoding: An optional encoding used to decode the keys and values. Defaults to utf-8, which the W3C declares as a defaul in the W3C algorithm for encoding.
@see http://www.w3.org/TR/html5/forms.html#application/x-www-form-urlencoded-encoding-algorithm
@param normalized: parse number key in dict to proper list ?
'''
mydict = {}
plist = []
if query_string == "":
return mydict
if type(query_string) == bytes:
query_string = query_string.decode()
for element in query_string.split("&"):
try:
if unquote:
(var, val) = element.split("=")
if sys.version_info[0] == 2:
var = var.encode('ascii')
val = val.encode('ascii')
var = urllib.unquote_plus(var)
val = urllib.unquote_plus(val)
else:
(var, val) = element.split("=")
except ValueError:
raise MalformedQueryStringError
if encoding:
var = var.decode(encoding)
val = val.decode(encoding)
plist.append(parser_helper(var, val))
for di in plist:
(k, v) = di.popitem()
tempdict = mydict
while k in tempdict and type(v) is dict:
tempdict = tempdict[k]
(k, v) = v.popitem()
if k in tempdict and type(tempdict[k]).__name__ == 'list':
tempdict[k].append(v)
elif k in tempdict:
tempdict[k] = [tempdict[k], v]
else:
tempdict[k] = v
if normalized == True:
return _normalize(mydict)
return mydict
|
Main parse function
@param query_string:
@param unquote: unquote html query string ?
@param encoding: An optional encoding used to decode the keys and values. Defaults to utf-8, which the W3C declares as a defaul in the W3C algorithm for encoding.
@see http://www.w3.org/TR/html5/forms.html#application/x-www-form-urlencoded-encoding-algorithm
@param normalized: parse number key in dict to proper list ?
|
def tuple_search(t, i, v):
"""
Search tuple array by index and value
:param t: tuple array
:param i: index of the value in each tuple
:param v: value
:return: the first tuple in the array with the specific index / value
"""
for e in t:
if e[i] == v:
return e
return None
|
Search tuple array by index and value
:param t: tuple array
:param i: index of the value in each tuple
:param v: value
:return: the first tuple in the array with the specific index / value
|
def sites(c):
"""
Build both doc sites w/ maxed nitpicking.
"""
# TODO: This is super lolzy but we haven't actually tackled nontrivial
# in-Python task calling yet, so we do this to get a copy of 'our' context,
# which has been updated with the per-collection config data of the
# docs/www subcollections.
docs_c = Context(config=c.config.clone())
www_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
www_c.update(**www.configuration())
# Must build both normally first to ensure good intersphinx inventory files
# exist =/ circular dependencies ahoy! Do it quietly to avoid pulluting
# output; only super-serious errors will bubble up.
# TODO: wants a 'temporarily tweak context settings' contextmanager
# TODO: also a fucking spinner cuz this confuses me every time I run it
# when the docs aren't already prebuilt
docs_c["run"].hide = True
www_c["run"].hide = True
docs["build"](docs_c)
www["build"](www_c)
docs_c["run"].hide = False
www_c["run"].hide = False
# Run the actual builds, with nitpick=True (nitpicks + tracebacks)
docs["build"](docs_c, nitpick=True)
www["build"](www_c, nitpick=True)
|
Build both doc sites w/ maxed nitpicking.
|
def reads(err_log):
"""
Parse the outputs from bbmerge to extract the total number of reads, as well as the number of reads that
could be paired
:param err_log: bbmerge outputs the stats in the error file
:return: num_reads, the total number of reads, paired_reads, number of paired readds
"""
# Initialise variables
num_reads = 0
paired_reads = 0
# Open the log file
with open(err_log, 'r') as error_log:
# Extract the necessary information
for line in error_log:
if 'Pairs:' in line:
num_reads = line.split('\t')[-1].rstrip()
elif 'Joined:' in line:
paired_reads = line.split('\t')[-2].rstrip()
return num_reads, paired_reads
|
Parse the outputs from bbmerge to extract the total number of reads, as well as the number of reads that
could be paired
:param err_log: bbmerge outputs the stats in the error file
:return: num_reads, the total number of reads, paired_reads, number of paired readds
|
def extract_to(self, *, stream=None, fileprefix=''):
"""Attempt to extract the image directly to a usable image file
If possible, the compressed data is extracted and inserted into
a compressed image file format without transcoding the compressed
content. If this is not possible, the data will be decompressed
and extracted to an appropriate format.
Because it is not known until attempted what image format will be
extracted, users should not assume what format they are getting back.
When saving the image to a file, use a temporary filename, and then
rename the file to its final name based on the returned file extension.
Examples:
>>> im.extract_to(stream=bytes_io)
'.png'
>>> im.extract_to(fileprefix='/tmp/image00')
'/tmp/image00.jpg'
Args:
stream: Writable stream to write data to.
fileprefix (str or Path): The path to write the extracted image to,
without the file extension.
Returns:
str: If *fileprefix* was provided, then the fileprefix with the
appropriate extension. If no *fileprefix*, then an extension
indicating the file type.
"""
if bool(stream) == bool(fileprefix):
raise ValueError("Cannot set both stream and fileprefix")
if stream:
return self._extract_to_stream(stream=stream)
bio = BytesIO()
extension = self._extract_to_stream(stream=bio)
bio.seek(0)
filepath = Path(Path(fileprefix).name + extension)
with filepath.open('wb') as target:
copyfileobj(bio, target)
return str(filepath)
|
Attempt to extract the image directly to a usable image file
If possible, the compressed data is extracted and inserted into
a compressed image file format without transcoding the compressed
content. If this is not possible, the data will be decompressed
and extracted to an appropriate format.
Because it is not known until attempted what image format will be
extracted, users should not assume what format they are getting back.
When saving the image to a file, use a temporary filename, and then
rename the file to its final name based on the returned file extension.
Examples:
>>> im.extract_to(stream=bytes_io)
'.png'
>>> im.extract_to(fileprefix='/tmp/image00')
'/tmp/image00.jpg'
Args:
stream: Writable stream to write data to.
fileprefix (str or Path): The path to write the extracted image to,
without the file extension.
Returns:
str: If *fileprefix* was provided, then the fileprefix with the
appropriate extension. If no *fileprefix*, then an extension
indicating the file type.
|
def DoubleClick(cls):
''' 左键点击2次 '''
element = cls._element()
action = ActionChains(Web.driver)
action.double_click(element)
action.perform()
|
左键点击2次
|
def add_pr_curve(self, tag, labels, predictions, num_thresholds,
global_step=None, weights=None):
"""Adds precision-recall curve.
Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the MXNet engine.
Parameters
----------
tag : str
A tag attached to the summary. Used by TensorBoard for organization.
labels : MXNet `NDArray` or `numpy.ndarray`.
The ground truth values. A tensor of 0/1 values with arbitrary shape.
predictions : MXNet `NDArray` or `numpy.ndarray`.
A float32 tensor whose values are in the range `[0, 1]`. Dimensions must match
those of `labels`.
num_thresholds : int
Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for.
Should be `>= 2`. This value should be a constant integer value, not a tensor
that stores an integer.
The thresholds for computing the pr curves are calculated in the following way:
`width = 1.0 / (num_thresholds - 1),
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]`.
global_step : int
Global step value to record.
weights : MXNet `NDArray` or `numpy.ndarray`.
Optional float32 tensor. Individual counts are multiplied by this value.
This tensor must be either the same shape as or broadcastable to the `labels`
tensor.
"""
if num_thresholds < 2:
raise ValueError('num_thresholds must be >= 2')
labels = _make_numpy_array(labels)
predictions = _make_numpy_array(predictions)
self._file_writer.add_summary(pr_curve_summary(tag, labels, predictions,
num_thresholds, weights), global_step)
|
Adds precision-recall curve.
Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs.
Since `asnumpy()` is a blocking function call, this function would block the main
thread till it returns. It may consequently affect the performance of async execution
of the MXNet engine.
Parameters
----------
tag : str
A tag attached to the summary. Used by TensorBoard for organization.
labels : MXNet `NDArray` or `numpy.ndarray`.
The ground truth values. A tensor of 0/1 values with arbitrary shape.
predictions : MXNet `NDArray` or `numpy.ndarray`.
A float32 tensor whose values are in the range `[0, 1]`. Dimensions must match
those of `labels`.
num_thresholds : int
Number of thresholds, evenly distributed in `[0, 1]`, to compute PR metrics for.
Should be `>= 2`. This value should be a constant integer value, not a tensor
that stores an integer.
The thresholds for computing the pr curves are calculated in the following way:
`width = 1.0 / (num_thresholds - 1),
thresholds = [0.0, 1*width, 2*width, 3*width, ..., 1.0]`.
global_step : int
Global step value to record.
weights : MXNet `NDArray` or `numpy.ndarray`.
Optional float32 tensor. Individual counts are multiplied by this value.
This tensor must be either the same shape as or broadcastable to the `labels`
tensor.
|
def delete(self):
"""Deletes the bucket.
Raises:
Exception if there was an error deleting the bucket.
"""
if self.exists():
try:
self._api.buckets_delete(self._name)
except Exception as e:
raise e
|
Deletes the bucket.
Raises:
Exception if there was an error deleting the bucket.
|
def connections_from_object(self, from_obj):
"""
Returns a ``Connection`` query set matching all connections with
the given object as a source.
"""
self._validate_ctypes(from_obj, None)
return self.connections.filter(from_pk=from_obj.pk)
|
Returns a ``Connection`` query set matching all connections with
the given object as a source.
|
def throw(self, method, args={}, nowait=False, **kwargs):
"""Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
"""
r = self.call_or_cast(method, args, type=ACTOR_TYPE.RR,
nowait=nowait, **kwargs)
if not nowait:
return r
|
Call method on one of the agents in round robin.
See :meth:`call_or_cast` for a full list of supported
arguments.
If the keyword argument `nowait` is false (default) it
will block and return the reply.
|
def clear_mask(self):
"""Clear mask from image.
This does not clear loaded masks from memory."""
if self.masktag:
try:
self.canvas.delete_object_by_tag(self.masktag, redraw=False)
except Exception:
pass
if self.maskhltag:
try:
self.canvas.delete_object_by_tag(self.maskhltag, redraw=False)
except Exception:
pass
self.treeview.clear() # Clear table too
self.fitsimage.redraw()
|
Clear mask from image.
This does not clear loaded masks from memory.
|
def results_tc(self, key, value):
"""Write data to results_tc file in TcEX specified directory.
The TcEx platform support persistent values between executions of the App. This
method will store the values for TC to read and put into the Database.
Args:
key (string): The data key to be stored.
value (string): The data value to be stored.
"""
if os.access(self.default_args.tc_out_path, os.W_OK):
results_file = '{}/results.tc'.format(self.default_args.tc_out_path)
else:
results_file = 'results.tc'
new = True
open(results_file, 'a').close() # ensure file exists
with open(results_file, 'r+') as fh:
results = ''
for line in fh.read().strip().split('\n'):
if not line:
continue
try:
k, v = line.split(' = ')
except ValueError:
# handle null/empty value (e.g., "name =")
k, v = line.split(' =')
if k == key:
v = value
new = False
if v is not None:
results += '{} = {}\n'.format(k, v)
if new and value is not None: # indicates the key/value pair didn't already exist
results += '{} = {}\n'.format(key, value)
fh.seek(0)
fh.write(results)
fh.truncate()
|
Write data to results_tc file in TcEX specified directory.
The TcEx platform support persistent values between executions of the App. This
method will store the values for TC to read and put into the Database.
Args:
key (string): The data key to be stored.
value (string): The data value to be stored.
|
def __get_average_intra_cluster_distance(self, entry):
"""!
@brief Calculates average intra cluster distance between current and specified clusters.
@param[in] entry (cfentry): Clustering feature to which distance should be obtained.
@return (double) Average intra cluster distance.
"""
linear_part_first = list_math_addition(self.linear_sum, entry.linear_sum);
linear_part_second = linear_part_first;
linear_part_distance = sum(list_math_multiplication(linear_part_first, linear_part_second));
general_part_distance = 2.0 * (self.number_points + entry.number_points) * (self.square_sum + entry.square_sum) - 2.0 * linear_part_distance;
return (general_part_distance / ( (self.number_points + entry.number_points) * (self.number_points + entry.number_points - 1.0) )) ** 0.5;
|
!
@brief Calculates average intra cluster distance between current and specified clusters.
@param[in] entry (cfentry): Clustering feature to which distance should be obtained.
@return (double) Average intra cluster distance.
|
def read_from_file(self, filename):
"""Read from an existing json file.
:param filename: The file to be written to.
:type filename: basestring, str
:returns: Success status. -1 for unsuccessful 0 for success
:rtype: int
"""
if not exists(filename):
return -1
with open(filename) as fd:
needs_json = fd.read()
try:
minimum_needs = json.loads(needs_json)
except (TypeError, ValueError):
minimum_needs = None
if not minimum_needs:
return -1
return self.update_minimum_needs(minimum_needs)
|
Read from an existing json file.
:param filename: The file to be written to.
:type filename: basestring, str
:returns: Success status. -1 for unsuccessful 0 for success
:rtype: int
|
def Parse(self):
"""Parse program output."""
(start_line, lang) = self.ParseDesc()
if start_line < 0:
return
if 'python' == lang:
self.ParsePythonFlags(start_line)
elif 'c' == lang:
self.ParseCFlags(start_line)
elif 'java' == lang:
self.ParseJavaFlags(start_line)
|
Parse program output.
|
def addFeatureToGraph(
self, add_region=True, region_id=None, feature_as_class=False):
"""
We make the assumption here that all features are instances.
The features are located on a region,
which begins and ends with faldo:Position
The feature locations leverage the Faldo model,
which has a general structure like:
Triples:
feature_id a feature_type (individual)
faldo:location region_id
region_id a faldo:region
faldo:begin start_position
faldo:end end_position
start_position a
(any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position)
faldo:position Integer(numeric position)
faldo:reference reference_id
end_position a
(any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position)
faldo:position Integer(numeric position)
faldo:reference reference_id
:param graph:
:return:
"""
if feature_as_class:
self.model.addClassToGraph(
self.fid, self.label, self.ftype, self.description)
else:
self.model.addIndividualToGraph(
self.fid, self.label, self.ftype, self.description)
if self.start is None and self.stop is None:
add_region = False
if add_region:
# create a region that has the begin/end positions
regionchr = re.sub(r'\w+\:_?', '', self.start['reference'])
if region_id is None:
# in case the values are undefined
# if we know only one of the coordinates,
# then we'll add an "unknown" other.
st = sp = 'UN'
strand = None
if self.start is not None and self.start['coordinate'] is not None:
st = str(self.start['coordinate'])
strand = self._getStrandStringFromPositionTypes(self.start['type'])
if self.stop is not None and self.stop['coordinate'] is not None:
sp = str(self.stop['coordinate'])
if strand is not None:
strand = self._getStrandStringFromPositionTypes(
self.stop['type'])
# assume that the strand is the same for both start and stop.
# this will need to be fixed in the future
region_items = [regionchr, st, sp]
if strand is not None:
region_items += [strand]
region_id = '-'.join(region_items)
rid = region_id
rid = re.sub(r'\w+\:', '', rid, 1) # replace the id prefix
rid = '_:'+rid+"-Region"
region_id = rid
self.graph.addTriple(self.fid, self.globaltt['location'], region_id)
self.model.addIndividualToGraph(region_id, None, self.globaltt['Region'])
else:
region_id = self.fid
self.model.addType(region_id, self.globaltt['region'])
# add the start/end positions to the region
beginp = endp = None
if self.start is not None:
beginp = self._makePositionId(
self.start['reference'], self.start['coordinate'], self.start['type'])
self.addPositionToGraph(
self.start['reference'], self.start['coordinate'], self.start['type'])
if self.stop is not None:
endp = self._makePositionId(
self.stop['reference'], self.stop['coordinate'], self.stop['type'])
self.addPositionToGraph(
self.stop['reference'], self.stop['coordinate'], self.stop['type'])
self.addRegionPositionToGraph(region_id, beginp, endp)
# {coordinate : integer, reference : reference_id, types = []}
return
|
We make the assumption here that all features are instances.
The features are located on a region,
which begins and ends with faldo:Position
The feature locations leverage the Faldo model,
which has a general structure like:
Triples:
feature_id a feature_type (individual)
faldo:location region_id
region_id a faldo:region
faldo:begin start_position
faldo:end end_position
start_position a
(any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position)
faldo:position Integer(numeric position)
faldo:reference reference_id
end_position a
(any of: faldo:(((Both|Plus|Minus)Strand)|Exact)Position)
faldo:position Integer(numeric position)
faldo:reference reference_id
:param graph:
:return:
|
def getMibSymbol(self):
"""Returns MIB variable symbolic identification.
Returns
-------
str
MIB module name
str
MIB variable symbolic name
: :py:class:`~pysnmp.proto.rfc1902.ObjectName`
class instance representing MIB variable instance index.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Examples
--------
>>> objectIdentity = ObjectIdentity('1.3.6.1.2.1.1.1.0')
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getMibSymbol()
('SNMPv2-MIB', 'sysDescr', (0,))
>>>
"""
if self._state & self.ST_CLEAN:
return self._modName, self._symName, self._indices
else:
raise SmiError(
'%s object not fully initialized' % self.__class__.__name__)
|
Returns MIB variable symbolic identification.
Returns
-------
str
MIB module name
str
MIB variable symbolic name
: :py:class:`~pysnmp.proto.rfc1902.ObjectName`
class instance representing MIB variable instance index.
Raises
------
SmiError
If MIB variable conversion has not been performed.
Examples
--------
>>> objectIdentity = ObjectIdentity('1.3.6.1.2.1.1.1.0')
>>> objectIdentity.resolveWithMib(mibViewController)
>>> objectIdentity.getMibSymbol()
('SNMPv2-MIB', 'sysDescr', (0,))
>>>
|
def verify_system_status(self):
"""Verify system status."""
if not sys.platform.startswith('linux'):
raise InstallError('Supported platform is Linux only.')
if self.python.is_system_python():
if self.python.is_python_binding_installed():
message = '''
RPM Python binding already installed on system Python.
Nothing to do.
'''
Log.info(message)
raise InstallSkipError(message)
elif self.sys_installed:
pass
else:
message = '''
RPM Python binding on system Python should be installed manually.
Install the proper RPM package of python{,2,3}-rpm,
or set a environment variable RPM_PY_SYS=true
'''
raise InstallError(message)
if self.rpm.is_system_rpm():
self.verify_package_status()
|
Verify system status.
|
def subnet_range(ip_net, cidr):
"""
Function to return a subnet range value from a IP address and CIDR pair
Args:
ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 1 to 32
Returns: returns a dictionary of info
"""
subnets_dict = dict()
subnet = whole_subnet_maker(ip_net, cidr)
subnets_dict['IP'] = ip_net
subnets_dict['NET'] = subnet
subnets_dict['CIDR'] = '%s/%s' % (whole_subnet_maker(ip_net, cidr), cidr)
if int(cidr) >= 24:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[3]) + 1
last_ip = (int(subnet_split[3]) + 1) + (253 - int(__mask_conversion[int(cidr)]['OCT4']))
bcast_ip = (int(subnet_split[3]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT4']))
temp = '%s.%s.%s.' % (subnet_split[0], subnet_split[1], subnet_split[2])
subnets_dict['RANGE'] = '%s%i to %s%i' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 16:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[2])
last_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT3']))
bcast_ip = (int(subnet_split[2]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT3']))
temp = '%s.%s.' % (subnet_split[0], subnet_split[1])
subnets_dict['RANGE'] = '%s%i.1 to %s%i.254' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i.255' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 8:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[1])
last_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT2']))
bcast_ip = (int(subnet_split[1]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT2']))
temp = '%s.' % (subnet_split[0],)
subnets_dict['RANGE'] = '%s%i.0.1 to %s%i.255.254' % (temp, first_ip, temp, last_ip)
subnets_dict['BCAST'] = '%s%i.255.255' % (temp, bcast_ip)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
elif int(cidr) >= 1:
subnet_split = subnet.split('.')
first_ip = int(subnet_split[0])
last_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT1']))
bcast_ip = (int(subnet_split[0]) + 1) + (254 - int(__mask_conversion[int(cidr)]['OCT1']))
subnets_dict['RANGE'] = '%i.0.0.1 to %i.255.255.254' % (first_ip, last_ip)
subnets_dict['BCAST'] = '%i.255.255.255' % (bcast_ip,)
subnets_dict['MASK'] = __mask_conversion[int(cidr)]['MASK']
subnets_dict['INVMASK'] = __mask_conversion[int(cidr)]['INVMASK']
subnets_dict['CIDRVAL'] = __mask_conversion[int(cidr)]['CIDR']
return subnets_dict
|
Function to return a subnet range value from a IP address and CIDR pair
Args:
ip_net: Unicast or Multicast IP address or subnet in the following format 192.168.1.1, 239.1.1.1
cidr: CIDR value of 1 to 32
Returns: returns a dictionary of info
|
def parse_name(cls, name: str, default: T = None) -> T:
"""Parse specified name for IntEnum; return default if not found."""
if not name:
return default
name = name.lower()
return next((item for item in cls if name == item.name.lower()), default)
|
Parse specified name for IntEnum; return default if not found.
|
def m2i(self, pkt, s):
"""
The good thing about safedec is that it may still decode ASN1
even if there is a mismatch between the expected tag (self.ASN1_tag)
and the actual tag; the decoded ASN1 object will simply be put
into an ASN1_BADTAG object. However, safedec prevents the raising of
exceptions needed for ASN1F_optional processing.
Thus we use 'flexible_tag', which should be False with ASN1F_optional.
Regarding other fields, we might need to know whether encoding went
as expected or not. Noticeably, input methods from cert.py expect
certain exceptions to be raised. Hence default flexible_tag is False.
"""
diff_tag, s = BER_tagging_dec(s, hidden_tag=self.ASN1_tag,
implicit_tag=self.implicit_tag,
explicit_tag=self.explicit_tag,
safe=self.flexible_tag)
if diff_tag is not None:
# this implies that flexible_tag was True
if self.implicit_tag is not None:
self.implicit_tag = diff_tag
elif self.explicit_tag is not None:
self.explicit_tag = diff_tag
codec = self.ASN1_tag.get_codec(pkt.ASN1_codec)
if self.flexible_tag:
return codec.safedec(s, context=self.context)
else:
return codec.dec(s, context=self.context)
|
The good thing about safedec is that it may still decode ASN1
even if there is a mismatch between the expected tag (self.ASN1_tag)
and the actual tag; the decoded ASN1 object will simply be put
into an ASN1_BADTAG object. However, safedec prevents the raising of
exceptions needed for ASN1F_optional processing.
Thus we use 'flexible_tag', which should be False with ASN1F_optional.
Regarding other fields, we might need to know whether encoding went
as expected or not. Noticeably, input methods from cert.py expect
certain exceptions to be raised. Hence default flexible_tag is False.
|
def ProfileRunValidationOutputFromOptions(feed, options):
"""Run RunValidationOutputFromOptions, print profile and return exit code."""
import cProfile
import pstats
# runctx will modify a dict, but not locals(). We need a way to get rv back.
locals_for_exec = locals()
cProfile.runctx('rv = RunValidationOutputFromOptions(feed, options)',
globals(), locals_for_exec, 'validate-stats')
# Only available on Unix, http://docs.python.org/lib/module-resource.html
import resource
print("Time: %d seconds" % (
resource.getrusage(resource.RUSAGE_SELF).ru_utime +
resource.getrusage(resource.RUSAGE_SELF).ru_stime))
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/286222
# http://aspn.activestate.com/ASPN/Cookbook/ "The recipes are freely
# available for review and use."
def _VmB(VmKey):
"""Return size from proc status in bytes."""
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except:
raise Exception("no proc file %s" % _proc_status)
return 0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
try:
i = v.index(VmKey)
v = v[i:].split(None, 3) # whitespace
except:
return 0 # v is empty
if len(v) < 3:
raise Exception("%s" % v)
return 0 # invalid format?
# convert Vm value to bytes
return int(float(v[1]) * _scale[v[2]])
# I ran this on over a hundred GTFS files, comparing VmSize to VmRSS
# (resident set size). The difference was always under 2% or 3MB.
print("Virtual Memory Size: %d bytes" % _VmB('VmSize:'))
# Output report of where CPU time was spent.
p = pstats.Stats('validate-stats')
p.strip_dirs()
p.sort_stats('cumulative').print_stats(30)
p.sort_stats('cumulative').print_callers(30)
return locals_for_exec['rv']
|
Run RunValidationOutputFromOptions, print profile and return exit code.
|
def set_bind(self):
"""
Sets key bindings -- we need this more than once
"""
RangedInt.set_bind(self)
self.unbind('<Next>')
self.unbind('<Prior>')
self.bind('<Next>', lambda e: self.set(self._min()))
self.bind('<Prior>', lambda e: self.set(self._max()))
|
Sets key bindings -- we need this more than once
|
def GET(self, courseid, taskid, path): # pylint: disable=arguments-differ
""" GET request """
try:
course = self.course_factory.get_course(courseid)
if not self.user_manager.course_is_open_to_user(course):
return self.template_helper.get_renderer().course_unavailable()
path_norm = posixpath.normpath(urllib.parse.unquote(path))
if taskid == "$common":
public_folder = course.get_fs().from_subfolder("$common").from_subfolder("public")
else:
task = course.get_task(taskid)
if not self.user_manager.task_is_visible_by_user(task): # ignore LTI check here
return self.template_helper.get_renderer().task_unavailable()
public_folder = task.get_fs().from_subfolder("public")
(method, mimetype_or_none, file_or_url) = public_folder.distribute(path_norm, False)
if method == "local":
web.header('Content-Type', mimetype_or_none)
return file_or_url
elif method == "url":
raise web.redirect(file_or_url)
else:
raise web.notfound()
except web.HTTPError as error_or_redirect:
raise error_or_redirect
except:
if web.config.debug:
raise
else:
raise web.notfound()
|
GET request
|
def parser(self):
"""return the parser for the current name"""
module = self.module
subcommands = self.subcommands
if subcommands:
module_desc = inspect.getdoc(module)
parser = Parser(description=module_desc, module=module)
subparsers = parser.add_subparsers()
for sc_name, callback in subcommands.items():
sc_name = sc_name.replace("_", "-")
cb_desc = inspect.getdoc(callback)
sc_parser = subparsers.add_parser(
sc_name,
callback=callback,
help=cb_desc
)
else:
parser = Parser(callback=self.callbacks[self.function_name], module=module)
return parser
|
return the parser for the current name
|
def flock(path):
"""Attempt to acquire a POSIX file lock.
"""
with open(path, "w+") as lf:
try:
fcntl.flock(lf, fcntl.LOCK_EX | fcntl.LOCK_NB)
acquired = True
yield acquired
except OSError:
acquired = False
yield acquired
finally:
if acquired:
fcntl.flock(lf, fcntl.LOCK_UN)
|
Attempt to acquire a POSIX file lock.
|
def assets(self, asset_code=None, asset_issuer=None, cursor=None, order='asc', limit=10):
"""This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict
"""
endpoint = '/assets'
params = self.__query_params(asset_code=asset_code, asset_issuer=asset_issuer, cursor=cursor, order=order,
limit=limit)
return self.query(endpoint, params)
|
This endpoint represents all assets. It will give you all the assets
in the system along with various statistics about each.
See the documentation below for details on query parameters that are
available.
`GET /assets{?asset_code,asset_issuer,cursor,limit,order}
<https://www.stellar.org/developers/horizon/reference/endpoints/assets-all.html>`_
:param str asset_code: Code of the Asset to filter by.
:param str asset_issuer: Issuer of the Asset to filter by.
:param int cursor: A paging token, specifying where to start returning records from.
:param str order: The order in which to return rows, "asc" or "desc",
ordered by asset_code then by asset_issuer.
:param int limit: Maximum number of records to return.
:return: A list of all valid payment operations
:rtype: dict
|
def update(self):
""" Update the PopupWindow if it is currently showing. This avoids
calling update during initialization.
"""
if not self.showing:
return
d = self.declaration
self.set_show(d.show)
|
Update the PopupWindow if it is currently showing. This avoids
calling update during initialization.
|
def read_excel(file_name, offset=1, sheet_index=0):
"""
读取 Excel
:param sheet_index:
:param file_name:
:param offset: 偏移,一般第一行是表头,不需要读取数据
:return:
"""
try:
workbook = xlrd.open_workbook(file_name)
except Exception as e:
return None
if len(workbook.sheets()) <= 0:
return []
sh = workbook.sheets()[sheet_index]
raw_data = []
n_rows = sh.nrows
row = sh.row_values(0)
header = []
for t in row:
t = t.strip().lower()
header.append(t)
# n_cols = sh.ncols
# 第0行是提示信息和标题,跳过
for i in range(offset, n_rows):
try:
row = sh.row_values(i)
d = {}
for j, t in enumerate(header):
d[t] = row[j]
raw_data.append(d)
except Exception as e:
pass
return raw_data
|
读取 Excel
:param sheet_index:
:param file_name:
:param offset: 偏移,一般第一行是表头,不需要读取数据
:return:
|
def main():
"""
Entry point
"""
rc_settings = read_rcfile()
parser = ArgumentParser(description='Millipede generator')
parser.add_argument('-s', '--size',
type=int,
nargs="?",
help='the size of the millipede')
parser.add_argument('-c', '--comment',
type=str,
help='the comment')
parser.add_argument('-v', '--version',
action='version',
version=__version__)
parser.add_argument('-r', '--reverse',
action='store_true',
help='reverse the millipede')
parser.add_argument('-t', '--template',
help='customize your millipede')
parser.add_argument('-p', '--position',
type=int,
help='move your millipede')
parser.add_argument('-o', '--opposite',
action='store_true',
help='go the opposite direction')
parser.add_argument(
'--http-host',
metavar="The http server to send the data",
help='Send the millipede via an http post request'
)
parser.add_argument(
'--http-auth',
metavar='user:pass',
help='Used to authenticate to the API ',
default=os.environ.get('HTTP_AUTH')
)
parser.add_argument(
'--http-data',
metavar='key=value',
nargs='*',
help='Add additional HTTP POST data'
)
parser.add_argument(
'--http-name',
metavar='name',
help='The json variable name that will contain the millipede'
)
args = parser.parse_args()
settings = compute_settings(vars(args), rc_settings)
out = millipede(
settings['size'],
comment=settings['comment'],
reverse=settings['reverse'],
template=settings['template'],
position=settings['position'],
opposite=settings['opposite']
)
if args.http_host:
if args.http_auth:
try:
login, passwd = args.http_auth.split(':')
except ValueError:
parser.error(
"Credentials should be a string like "
"`user:pass'"
)
else:
login = None
passwd = None
api_post(
out,
args.http_host,
args.http_name,
http_data=args.http_data,
auth=(login, passwd)
)
print(out, end='')
|
Entry point
|
def get_requests(self):
"""
Creates product structure and returns list of files for download
:return: list of download requests
:rtype: list(download.DownloadRequest)
"""
safe = self.get_safe_struct()
self.download_list = []
self.structure_recursion(safe, self.parent_folder)
self.sort_download_list()
return self.download_list, self.folder_list
|
Creates product structure and returns list of files for download
:return: list of download requests
:rtype: list(download.DownloadRequest)
|
def text(self):
"""Formatted Command declaration.
This is the C declaration for the command.
"""
params = ', '.join(x.text for x in self.params)
return '{0} ({1})'.format(self.proto_text, params)
|
Formatted Command declaration.
This is the C declaration for the command.
|
def _combine_out_files(chr_files, work_dir, data):
"""Concatenate all CNV calls into a single file.
"""
out_file = "%s.bed" % sshared.outname_from_inputs(chr_files)
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for chr_file in chr_files:
with open(chr_file) as in_handle:
is_empty = in_handle.readline().startswith("track name=empty")
if not is_empty:
with open(chr_file) as in_handle:
shutil.copyfileobj(in_handle, out_handle)
return out_file
|
Concatenate all CNV calls into a single file.
|
def status_unreblog(self, id):
"""
Un-reblog a status.
Returns a `toot dict`_ with the status that used to be reblogged.
"""
id = self.__unpack_id(id)
url = '/api/v1/statuses/{0}/unreblog'.format(str(id))
return self.__api_request('POST', url)
|
Un-reblog a status.
Returns a `toot dict`_ with the status that used to be reblogged.
|
def create(sub_array_id):
"""Create / register a Scheduling Block instance with SDP."""
config = request.data
config['sub_array_id'] = 'subarray-{:02d}'.format(sub_array_id)
return add_scheduling_block(config)
|
Create / register a Scheduling Block instance with SDP.
|
def depends_on_helper(obj):
""" Handles using .title if the given object is a troposphere resource.
If the given object is a troposphere resource, use the `.title` attribute
of that resource. If it's a string, just use the string. This should allow
more pythonic use of DependsOn.
"""
if isinstance(obj, AWSObject):
return obj.title
elif isinstance(obj, list):
return list(map(depends_on_helper, obj))
return obj
|
Handles using .title if the given object is a troposphere resource.
If the given object is a troposphere resource, use the `.title` attribute
of that resource. If it's a string, just use the string. This should allow
more pythonic use of DependsOn.
|
def p_contextualize_item(self, t):
"""contextualize_item : SYSTEM VAR CONFIGURE VAR
| SYSTEM VAR CONFIGURE VAR STEP NUMBER
| SYSTEM VAR CONFIGURE VAR WITH VAR"""
if len(t) == 5:
t[0] = contextualize_item(t[2], t[4], line=t.lineno(1))
elif t[5] == "with":
t[0] = contextualize_item(t[2], t[4], ctxt_tool=t[6], line=t.lineno(1))
else:
t[0] = contextualize_item(t[2], t[4], num=t[6], line=t.lineno(1))
|
contextualize_item : SYSTEM VAR CONFIGURE VAR
| SYSTEM VAR CONFIGURE VAR STEP NUMBER
| SYSTEM VAR CONFIGURE VAR WITH VAR
|
def main(http_port, peer_name, node_name, app_id):
"""
Runs the framework
:param http_port: HTTP port to listen to
:param peer_name: Name of the peer
:param node_name: Name (also, UID) of the node hosting the peer
:param app_id: Application ID
"""
# Create the framework
framework = pelix.framework.create_framework(
('pelix.ipopo.core',
'pelix.ipopo.waiting',
'pelix.shell.core',
'pelix.shell.ipopo',
'pelix.shell.console',
'pelix.http.basic',
# Herald core
'herald.core',
'herald.directory',
'herald.shell',
# Herald HTTP
'herald.transports.http.directory',
'herald.transports.http.discovery_multicast',
'herald.transports.http.servlet',
'herald.transports.http.transport',
# RPC
'pelix.remote.dispatcher',
'pelix.remote.registry',
'herald.remote.discovery',
'herald.remote.herald_xmlrpc',),
{herald.FWPROP_NODE_UID: node_name,
herald.FWPROP_NODE_NAME: node_name,
herald.FWPROP_PEER_NAME: peer_name,
herald.FWPROP_APPLICATION_ID: app_id})
# Start everything
framework.start()
context = framework.get_bundle_context()
# Instantiate components
with use_waiting_list(context) as ipopo:
# ... HTTP server
ipopo.add(pelix.http.FACTORY_HTTP_BASIC, "http-server",
{pelix.http.HTTP_SERVICE_PORT: http_port})
# ... HTTP reception servlet
ipopo.add(herald.transports.http.FACTORY_SERVLET,
"herald-http-servlet")
# ... HTTP multicast discovery
ipopo.add(herald.transports.http.FACTORY_DISCOVERY_MULTICAST,
"herald-http-discovery-multicast")
# Start the framework and wait for it to stop
framework.wait_for_stop()
|
Runs the framework
:param http_port: HTTP port to listen to
:param peer_name: Name of the peer
:param node_name: Name (also, UID) of the node hosting the peer
:param app_id: Application ID
|
def gen(self, text, start=0):
"""Return the source code in text, filled with autogenerated code
starting at start.
"""
for cc in self.chunkComment(text, start):
c = self.extractChunkContent(cc)
cc = ''.join(cc)
m = self.matchComment(c)
idx = text.index(cc, start)
e = idx + len(cc)
if m:
assert text[idx:e] == cc
try:
end = text.index('\n\n', e - 1) + 1
except ValueError:
end = len(text)
text = text[:e] + text[end:]
new = self.genOutputs(self.code(text), m)
new = ''.join(new)
text = text[:e] + new + text[e:]
return self.gen(text, e + len(new))
return text
|
Return the source code in text, filled with autogenerated code
starting at start.
|
def patch_ast(node, source, sorted_children=False):
"""Patches the given node
After calling, each node in `node` will have a new field named
`region` that is a tuple containing the start and end offsets
of the code that generated it.
If `sorted_children` is true, a `sorted_children` field will
be created for each node, too. It is a list containing child
nodes as well as whitespaces and comments that occur between
them.
"""
if hasattr(node, 'region'):
return node
walker = _PatchingASTWalker(source, children=sorted_children)
ast.call_for_nodes(node, walker)
return node
|
Patches the given node
After calling, each node in `node` will have a new field named
`region` that is a tuple containing the start and end offsets
of the code that generated it.
If `sorted_children` is true, a `sorted_children` field will
be created for each node, too. It is a list containing child
nodes as well as whitespaces and comments that occur between
them.
|
def set(self, x):
"""
Set variable values via a dictionary mapping name to value.
"""
for name, value in iter(x.items()):
if hasattr(value, "ndim"):
if self[name].value.ndim < value.ndim:
self[name].value.itemset(value.squeeze())
else:
self[name].value = value
else:
self[name].value.itemset(value)
|
Set variable values via a dictionary mapping name to value.
|
def set_decade_lims(axis=None,direction=None):
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
'''
# get current axis
if axis is None:
axis = plt.gca()
# x-axis
if direction is None or direction == 'x':
# - get current limits
MIN,MAX = axis.get_xlim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_xlim([MIN,MAX])
# y-axis
if direction is None or direction == 'y':
# - get current limits
MIN,MAX = axis.get_ylim()
# - floor/ceil to full decades
MIN = 10 ** ( np.floor(np.log10(MIN)) )
MAX = 10 ** ( np.ceil (np.log10(MAX)) )
# - apply
axis.set_ylim([MIN,MAX])
|
r'''
Set limits the the floor/ceil values in terms of decades.
:options:
**axis** ([``plt.gca()``] | ...)
Specify the axis to which to apply the limits.
**direction** ([``None``] | ``'x'`` | ``'y'``)
Limit the application to a certain direction (default: both).
|
def best_match(desired_language: {str, Language}, supported_languages: list,
min_score: int=75) -> (str, int):
"""
You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und'
- The score of the match, from 0 to 100
`min_score` sets the minimum match score. If all languages match with a lower
score than that, the result will be 'und' with a score of 0.
When there is a tie for the best matching language, the first one in the
tie will be used.
Setting `min_score` lower will enable more things to match, at the cost
of possibly mis-handling data or upsetting users. Read the documentation
for :func:`tag_match_score` to understand what the numbers mean.
>>> best_match('fr', ['de', 'en', 'fr'])
('fr', 100)
>>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl'])
('sr-Latn', 100)
>>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan'])
('zh-Hans', 100)
>>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan'])
('cmn-Hans', 100)
>>> best_match('pt', ['pt-BR', 'pt-PT'])
('pt-BR', 100)
>>> best_match('en-AU', ['en-GB', 'en-US'])
('en-GB', 96)
>>> best_match('es-MX', ['es-ES', 'es-419', 'en-US'])
('es-419', 96)
>>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY'])
('es-PU', 95)
>>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY'])
('es-AR', 95)
>>> best_match('zsm', ['id', 'mhp'])
('id', 86)
>>> best_match('eu', ['el', 'en', 'es'])
('es', 90)
>>> best_match('eu', ['el', 'en', 'es'], min_score=92)
('und', 0)
"""
# Quickly return if the desired language is directly supported
if desired_language in supported_languages:
return desired_language, 100
# Reduce the desired language to a standard form that could also match
desired_language = standardize_tag(desired_language)
if desired_language in supported_languages:
return desired_language, 100
match_scores = [
(supported, tag_match_score(desired_language, supported))
for supported in supported_languages
]
match_scores = [
(supported, score) for (supported, score) in match_scores
if score >= min_score
] + [('und', 0)]
match_scores.sort(key=lambda item: -item[1])
return match_scores[0]
|
You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und'
- The score of the match, from 0 to 100
`min_score` sets the minimum match score. If all languages match with a lower
score than that, the result will be 'und' with a score of 0.
When there is a tie for the best matching language, the first one in the
tie will be used.
Setting `min_score` lower will enable more things to match, at the cost
of possibly mis-handling data or upsetting users. Read the documentation
for :func:`tag_match_score` to understand what the numbers mean.
>>> best_match('fr', ['de', 'en', 'fr'])
('fr', 100)
>>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl'])
('sr-Latn', 100)
>>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan'])
('zh-Hans', 100)
>>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan'])
('cmn-Hans', 100)
>>> best_match('pt', ['pt-BR', 'pt-PT'])
('pt-BR', 100)
>>> best_match('en-AU', ['en-GB', 'en-US'])
('en-GB', 96)
>>> best_match('es-MX', ['es-ES', 'es-419', 'en-US'])
('es-419', 96)
>>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY'])
('es-PU', 95)
>>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY'])
('es-AR', 95)
>>> best_match('zsm', ['id', 'mhp'])
('id', 86)
>>> best_match('eu', ['el', 'en', 'es'])
('es', 90)
>>> best_match('eu', ['el', 'en', 'es'], min_score=92)
('und', 0)
|
def loads(cls, json_data):
"""description of load"""
try:
return cls(**cls.MARSHMALLOW_SCHEMA.loads(json_data))
except marshmallow.exceptions.ValidationError as exc:
raise ValidationError("Failed to load message", extra=exc.args[0])
|
description of load
|
def join(*args, **kwargs):
"""Join parts of a path together"""
import os.path
if _is_list(args[0]):
return os.path.join(*args[0])
return os.path.join(*args, **kwargs)
|
Join parts of a path together
|
def send(data, channels=None, push_time=None, expiration_time=None, expiration_interval=None, where=None, cql=None):
"""
发送推送消息。返回结果为此条推送对应的 _Notification 表中的对象,但是如果需要使用其中的数据,需要调用 fetch() 方法将数据同步至本地。
:param channels: 需要推送的频道
:type channels: list or tuple
:param push_time: 推送的时间
:type push_time: datetime
:param expiration_time: 消息过期的绝对日期时间
:type expiration_time: datetime
:param expiration_interval: 消息过期的相对时间,从调用 API 的时间开始算起,单位是秒
:type expiration_interval: int
:param where: 一个查询 _Installation 表的查询条件 leancloud.Query 对象
:type where: leancloud.Query
:param cql: 一个查询 _Installation 表的查询条件 CQL 语句
:type cql: string_types
:param data: 推送给设备的具体信息,详情查看 https://leancloud.cn/docs/push_guide.html#消息内容_Data
:rtype: Notification
"""
if expiration_interval and expiration_time:
raise TypeError('Both expiration_time and expiration_interval can\'t be set')
params = {
'data': data,
}
if client.USE_PRODUCTION == '0':
params['prod'] = 'dev'
if channels:
params['channels'] = channels
if push_time:
tzinfo = push_time.tzinfo
if tzinfo is None:
tzinfo = tz.tzlocal()
params['push_time'] = arrow.get(push_time, tzinfo).to('utc').format('YYYY-MM-DDTHH:mm:ss.SSS') + 'Z'
if expiration_time:
params['expiration_time'] = expiration_time.isoformat()
if expiration_interval:
params['expiration_interval'] = expiration_interval
if where:
params['where'] = where.dump().get('where', {})
if cql:
params['cql'] = cql
result = client.post('/push', params=params).json()
notification = Notification.create_without_data(result['objectId'])
return notification
|
发送推送消息。返回结果为此条推送对应的 _Notification 表中的对象,但是如果需要使用其中的数据,需要调用 fetch() 方法将数据同步至本地。
:param channels: 需要推送的频道
:type channels: list or tuple
:param push_time: 推送的时间
:type push_time: datetime
:param expiration_time: 消息过期的绝对日期时间
:type expiration_time: datetime
:param expiration_interval: 消息过期的相对时间,从调用 API 的时间开始算起,单位是秒
:type expiration_interval: int
:param where: 一个查询 _Installation 表的查询条件 leancloud.Query 对象
:type where: leancloud.Query
:param cql: 一个查询 _Installation 表的查询条件 CQL 语句
:type cql: string_types
:param data: 推送给设备的具体信息,详情查看 https://leancloud.cn/docs/push_guide.html#消息内容_Data
:rtype: Notification
|
def markup_description(description):
"""
Apply HTML markup to the given description.
"""
if apply_markdown:
description = apply_markdown(description)
else:
description = escape(description).replace('\n', '<br />')
description = '<p>' + description + '</p>'
return mark_safe(description)
|
Apply HTML markup to the given description.
|
def _augment_file(self, f):
"""
Augment a FileRecord with methods to get the data URL and to download, returning the updated file for use
in generator functions
:internal:
"""
def get_url(target):
if target.file_size is None:
return None
if target.file_name is not None:
return self.base_url + '/files/content/{0}/{1}'.format(target.file_id.hex, target.file_name)
else:
return self.base_url + '/files/content/{0}'.format(target.file_id.hex, )
f.get_url = types.MethodType(get_url, f)
def download_to(target, file_name):
url = target.get_url()
r = requests.get(url, stream=True)
with open(file_name, 'wb') as file_to_write:
for chunk in r.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
file_to_write.write(chunk)
file_to_write.flush()
return file_name
f.download_to = types.MethodType(download_to, f)
return f
|
Augment a FileRecord with methods to get the data URL and to download, returning the updated file for use
in generator functions
:internal:
|
def add_state_errors(self, errors):
"""
Add state errors
Accepts a list of errors (or a single Error) coming from validators
applied to entity as whole that are used for entity state validation
The errors will exist on a __state__ property of the errors object.
:param errors: list or Error, list of entity state validation errors
:return: shiftschema.result.Result
"""
if not self.errors:
self.errors = dict()
if '__state__' not in self.errors:
self.errors['__state__'] = []
if type(errors) is not list:
errors = [errors]
for error in errors:
if not isinstance(error, Error):
err = 'Error must be of type {}'
raise x.InvalidErrorType(err.format(Error))
self.errors['__state__'].append(error)
return self
|
Add state errors
Accepts a list of errors (or a single Error) coming from validators
applied to entity as whole that are used for entity state validation
The errors will exist on a __state__ property of the errors object.
:param errors: list or Error, list of entity state validation errors
:return: shiftschema.result.Result
|
def to_serializable_dict(self, attrs_to_serialize=None,
rels_to_expand=None,
rels_to_serialize=None,
key_modifications=None):
"""
An alias for `todict`
"""
return self.todict(
attrs_to_serialize=attrs_to_serialize,
rels_to_expand=rels_to_expand, rels_to_serialize=rels_to_serialize,
key_modifications=key_modifications)
|
An alias for `todict`
|
def render_mail_template(subject_template, body_template, context):
"""
Renders both the subject and body templates in the given context.
Returns a tuple (subject, body) of the result.
"""
try:
subject = strip_spaces(render_to_string(subject_template, context))
body = render_to_string(body_template, context)
finally:
pass
return subject, body
|
Renders both the subject and body templates in the given context.
Returns a tuple (subject, body) of the result.
|
def tplot_save(names, filename=None):
"""
This function will save tplot variables into a single file by using the python "pickle" function.
This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session,
but save all of your data/options. All variables and plot options can be read back into tplot with the
"tplot_restore" command.
Parameters:
names : str/list
A string or a list of strings of the tplot variables you would like saved.
filename : str, optional
The filename where you want to save the file.
Returns:
None
Examples:
>>> # Save a single tplot variable
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.ylim('Variable1', 2, 4)
>>> pytplot.save('Variable1', filename='C:/temp/variable1.pytplot')
"""
if isinstance(names,int):
names = list(data_quants.keys())[names-1]
if not isinstance(names, list):
names = [names]
#Check that we have all available data
for name in names:
if isinstance(data_quants[name].data, list):
for data_name in data_quants[name].data:
if data_name not in names:
names.append(data_name)
#Pickle it up
to_pickle =[]
for name in names:
if name not in data_quants.keys():
print("That name is currently not in pytplot")
return
to_pickle.append(data_quants[name])
num_quants = len(to_pickle)
to_pickle = [num_quants] + to_pickle
temp_tplot_opt_glob = tplot_opt_glob
to_pickle.append(temp_tplot_opt_glob)
if filename==None:
filename='var_'+'-'.join(names)+'.pytplot'
pickle.dump(to_pickle, open(filename, "wb"))
return
|
This function will save tplot variables into a single file by using the python "pickle" function.
This file can then be "restored" using tplot_restore. This is useful if you want to end the pytplot session,
but save all of your data/options. All variables and plot options can be read back into tplot with the
"tplot_restore" command.
Parameters:
names : str/list
A string or a list of strings of the tplot variables you would like saved.
filename : str, optional
The filename where you want to save the file.
Returns:
None
Examples:
>>> # Save a single tplot variable
>>> import pytplot
>>> x_data = [1,2,3,4,5]
>>> y_data = [1,2,3,4,5]
>>> pytplot.store_data("Variable1", data={'x':x_data, 'y':y_data})
>>> pytplot.ylim('Variable1', 2, 4)
>>> pytplot.save('Variable1', filename='C:/temp/variable1.pytplot')
|
def send_select_and_operate_command_set(self, command_set, callback=asiodnp3.PrintingCommandCallback.Get(),
config=opendnp3.TaskConfig().Default()):
"""
Select and operate a set of commands
:param command_set: set of command headers
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
"""
self.master.SelectAndOperate(command_set, callback, config)
|
Select and operate a set of commands
:param command_set: set of command headers
:param callback: callback that will be invoked upon completion or failure
:param config: optional configuration that controls normal callbacks and allows the user to be specified for SA
|
def get_ref_dict(self, schema):
"""Method to create a dictionary containing a JSON reference to the
schema in the spec
"""
schema_key = make_schema_key(schema)
ref_schema = build_reference(
"schema", self.openapi_version.major, self.refs[schema_key]
)
if getattr(schema, "many", False):
return {"type": "array", "items": ref_schema}
return ref_schema
|
Method to create a dictionary containing a JSON reference to the
schema in the spec
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.