code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def getitem(self, index, context=None):
"""Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
"""
return _container_getitem(self, self.elts, index, context=context)
|
Get an item from this node.
:param index: The node to use as a subscript index.
:type index: Const or Slice
|
def mlp(feature, hparams, name="mlp"):
"""Multi layer perceptron with dropout and relu activation."""
with tf.variable_scope(name, "mlp", values=[feature]):
num_mlp_layers = hparams.num_mlp_layers
mlp_size = hparams.mlp_size
for _ in range(num_mlp_layers):
feature = common_layers.dense(feature, mlp_size, activation=None)
utils.collect_named_outputs("norms", "mlp_feature",
tf.norm(feature, axis=-1))
feature = common_layers.layer_norm(feature)
feature = tf.nn.relu(feature)
feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout)
return feature
|
Multi layer perceptron with dropout and relu activation.
|
def colorize(occurence,maxoccurence,minoccurence):
'''A formula for determining colors.'''
if occurence == maxoccurence:
color = (255,0,0)
elif occurence == minoccurence:
color = (0,0,255)
else:
color = (int((float(occurence)/maxoccurence*255)),0,int(float(minoccurence)/occurence*255))
return color
|
A formula for determining colors.
|
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
self._find_usage_applications()
self._find_usage_application_versions()
self._find_usage_environments()
self._have_usage = True
logger.debug("Done checking usage.")
|
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
|
def to_table(self, sort_key="wall_time", stop=None):
"""Return a table (list of lists) with timer data"""
table = [list(AbinitTimerSection.FIELDS), ]
ord_sections = self.order_sections(sort_key)
if stop is not None:
ord_sections = ord_sections[:stop]
for osect in ord_sections:
row = [str(item) for item in osect.to_tuple()]
table.append(row)
return table
|
Return a table (list of lists) with timer data
|
def _on_set_auth(self, sock, token):
"""Set Auth request received from websocket"""
self.log.info(f"Token received: {token}")
sock.setAuthtoken(token)
|
Set Auth request received from websocket
|
def scale(self, new_max_value=1):
"""
Scale R, G and B parameters
:param new_max_value: how much to scale
:return: a new ColorRGB instance
"""
f = new_max_value / self.max
return ColorRGB(self.r * f,
self.g * f,
self.b * f,
max_value=new_max_value)
|
Scale R, G and B parameters
:param new_max_value: how much to scale
:return: a new ColorRGB instance
|
def _front_delta(self):
"""Return the offset of the colored part."""
if self.flags & self.NO_MOVE:
return Separator(0, 0)
if self.clicked and self.hovered: # the mouse is over the button
delta = 2
elif self.hovered and not self.flags & self.NO_HOVER:
delta = 0
else:
delta = 0
return Separator(delta, delta)
|
Return the offset of the colored part.
|
def bulkCmd(snmpDispatcher, authData, transportTarget,
nonRepeaters, maxRepetitions, *varBinds, **options):
"""Initiate SNMP GETBULK query over SNMPv2c.
Based on passed parameters, prepares SNMP GETBULK packet
(:RFC:`1905#section-4.2.3`) and schedules its transmission by
I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing SNMP dispatcher.
authData: :py:class:`~pysnmp.hlapi.v1arch.CommunityData` or :py:class:`~pysnmp.hlapi.v1arch.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.v1arch.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.v1arch.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer
address.
nonRepeaters: int
One MIB variable is requested in response for the first
`nonRepeaters` MIB variables in request.
maxRepetitions: int
`maxRepetitions` MIB variables are requested in response for each
of the remaining MIB variables in the request (e.g. excluding
`nonRepeaters`). Remote SNMP dispatcher may choose lesser value than
requested.
\*varBinds: :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Notes
-----
User-supplied `cbFun` callable must have the following call
signature:
* snmpDispatcher (:py:class:`~pysnmp.hlapi.v1arch.snmpDispatcher`):
Class instance representing SNMP dispatcher.
* stateHandle (int): Unique request identifier. Can be used
for matching multiple ongoing requests with received responses.
* errorIndication (str): True value indicates SNMP dispatcher error.
* errorStatus (str): True value indicates SNMP PDU error.
* errorIndex (int): Non-zero value refers to `varBinds[errorIndex-1]`
* varBindTable (tuple): A sequence of sequences (e.g. 2-D array) of
variable-bindings represented as :class:`tuple` or
:py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
representing a table of MIB variables returned in SNMP response, with
up to ``maxRepetitions`` rows, i.e. ``len(varBindTable) <= maxRepetitions``.
For ``0 <= i < len(varBindTable)`` and ``0 <= j < len(varBinds)``,
``varBindTable[i][j]`` represents:
- For non-repeaters (``j < nonRepeaters``), the first lexicographic
successor of ``varBinds[j]``, regardless the value of ``i``, or an
:py:class:`~pysnmp.smi.rfc1902.ObjectType` instance with the
:py:obj:`~pysnmp.proto.rfc1905.endOfMibView` value if no such successor
exists;
- For repeaters (``j >= nonRepeaters``), the ``i``-th lexicographic
successor of ``varBinds[j]``, or an
:py:class:`~pysnmp.smi.rfc1902.ObjectType` instance with the
:py:obj:`~pysnmp.proto.rfc1905.endOfMibView` value if no such successor
exists.
See :rfc:`3416#section-4.2.3` for details on the underlying
``GetBulkRequest-PDU`` and the associated ``GetResponse-PDU``, such as
specific conditions under which the server may truncate the response,
causing ``varBindTable`` to have less than ``maxRepetitions`` rows.
* `cbCtx` (object): Original user-supplied object.
Returns
-------
stateHandle : int
Unique request identifier. Can be used for matching received
responses with ongoing requests.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> def cbFun(snmpDispatcher, stateHandle, errorIndication,
>>> errorStatus, errorIndex, varBinds, cbCtx):
>>> print(errorIndication, errorStatus, errorIndex, varBinds)
>>>
>>> snmpDispatcher = snmpDispatcher()
>>>
>>> stateHandle = bulkCmd(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> 0, 2,
>>> ('1.3.6.1.2.1.1', None),
>>> cbFun=cbFun
>>> )
>>>
>>> snmpDispatcher.transportDispatcher.runDispatcher()
"""
def _cbFun(snmpDispatcher, stateHandle, errorIndication, rspPdu, _cbCtx):
if not cbFun:
return
if errorIndication:
cbFun(errorIndication, pMod.Integer(0), pMod.Integer(0), None,
cbCtx=cbCtx, snmpDispatcher=snmpDispatcher, stateHandle=stateHandle)
return
errorStatus = pMod.apiBulkPDU.getErrorStatus(rspPdu)
errorIndex = pMod.apiBulkPDU.getErrorIndex(rspPdu)
varBindTable = pMod.apiBulkPDU.getVarBindTable(reqPdu, rspPdu)
errorIndication, nextVarBinds = pMod.apiBulkPDU.getNextVarBinds(
varBindTable[-1], errorIndex=errorIndex
)
if options.get('lookupMib'):
varBindTable = [
VB_PROCESSOR.unmakeVarBinds(snmpDispatcher.cache, vbs) for vbs in varBindTable
]
nextStateHandle = pMod.getNextRequestID()
nextVarBinds = cbFun(errorIndication, errorStatus, errorIndex, varBindTable,
cbCtx=cbCtx,
snmpDispatcher=snmpDispatcher,
stateHandle=stateHandle,
nextStateHandle=nextStateHandle,
nextVarBinds=nextVarBinds)
if not nextVarBinds:
return
pMod.apiBulkPDU.setRequestID(reqPdu, nextStateHandle)
pMod.apiBulkPDU.setVarBinds(reqPdu, nextVarBinds)
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
if authData.mpModel < 1:
raise error.PySnmpError('GETBULK PDU is only supported in SNMPv2c and SNMPv3')
lookupMib, cbFun, cbCtx = [options.get(x) for x in ('lookupMib', 'cbFun', 'cbCtx')]
if lookupMib:
varBinds = VB_PROCESSOR.makeVarBinds(snmpDispatcher.cache, varBinds)
pMod = api.PROTOCOL_MODULES[authData.mpModel]
reqPdu = pMod.GetBulkRequestPDU()
pMod.apiBulkPDU.setDefaults(reqPdu)
pMod.apiBulkPDU.setNonRepeaters(reqPdu, nonRepeaters)
pMod.apiBulkPDU.setMaxRepetitions(reqPdu, maxRepetitions)
pMod.apiBulkPDU.setVarBinds(reqPdu, varBinds)
return snmpDispatcher.sendPdu(authData, transportTarget, reqPdu, cbFun=_cbFun)
|
Initiate SNMP GETBULK query over SNMPv2c.
Based on passed parameters, prepares SNMP GETBULK packet
(:RFC:`1905#section-4.2.3`) and schedules its transmission by
I/O framework at a later point of time.
Parameters
----------
snmpDispatcher: :py:class:`~pysnmp.hlapi.v1arch.asyncore.SnmpDispatcher`
Class instance representing SNMP dispatcher.
authData: :py:class:`~pysnmp.hlapi.v1arch.CommunityData` or :py:class:`~pysnmp.hlapi.v1arch.UsmUserData`
Class instance representing SNMP credentials.
transportTarget: :py:class:`~pysnmp.hlapi.v1arch.asyncore.UdpTransportTarget` or :py:class:`~pysnmp.hlapi.v1arch.asyncore.Udp6TransportTarget`
Class instance representing transport type along with SNMP peer
address.
nonRepeaters: int
One MIB variable is requested in response for the first
`nonRepeaters` MIB variables in request.
maxRepetitions: int
`maxRepetitions` MIB variables are requested in response for each
of the remaining MIB variables in the request (e.g. excluding
`nonRepeaters`). Remote SNMP dispatcher may choose lesser value than
requested.
\*varBinds: :py:class:`~pysnmp.smi.rfc1902.ObjectType`
One or more class instances representing MIB variables to place
into SNMP request.
Other Parameters
----------------
\*\*options :
Request options:
* `lookupMib` - load MIB and resolve response MIB variables at
the cost of slightly reduced performance. Default is `True`.
* `cbFun` (callable) - user-supplied callable that is invoked
to pass SNMP response data or error to user at a later point
of time. Default is `None`.
* `cbCtx` (object) - user-supplied object passing additional
parameters to/from `cbFun`. Default is `None`.
Notes
-----
User-supplied `cbFun` callable must have the following call
signature:
* snmpDispatcher (:py:class:`~pysnmp.hlapi.v1arch.snmpDispatcher`):
Class instance representing SNMP dispatcher.
* stateHandle (int): Unique request identifier. Can be used
for matching multiple ongoing requests with received responses.
* errorIndication (str): True value indicates SNMP dispatcher error.
* errorStatus (str): True value indicates SNMP PDU error.
* errorIndex (int): Non-zero value refers to `varBinds[errorIndex-1]`
* varBindTable (tuple): A sequence of sequences (e.g. 2-D array) of
variable-bindings represented as :class:`tuple` or
:py:class:`~pysnmp.smi.rfc1902.ObjectType` class instances
representing a table of MIB variables returned in SNMP response, with
up to ``maxRepetitions`` rows, i.e. ``len(varBindTable) <= maxRepetitions``.
For ``0 <= i < len(varBindTable)`` and ``0 <= j < len(varBinds)``,
``varBindTable[i][j]`` represents:
- For non-repeaters (``j < nonRepeaters``), the first lexicographic
successor of ``varBinds[j]``, regardless the value of ``i``, or an
:py:class:`~pysnmp.smi.rfc1902.ObjectType` instance with the
:py:obj:`~pysnmp.proto.rfc1905.endOfMibView` value if no such successor
exists;
- For repeaters (``j >= nonRepeaters``), the ``i``-th lexicographic
successor of ``varBinds[j]``, or an
:py:class:`~pysnmp.smi.rfc1902.ObjectType` instance with the
:py:obj:`~pysnmp.proto.rfc1905.endOfMibView` value if no such successor
exists.
See :rfc:`3416#section-4.2.3` for details on the underlying
``GetBulkRequest-PDU`` and the associated ``GetResponse-PDU``, such as
specific conditions under which the server may truncate the response,
causing ``varBindTable`` to have less than ``maxRepetitions`` rows.
* `cbCtx` (object): Original user-supplied object.
Returns
-------
stateHandle : int
Unique request identifier. Can be used for matching received
responses with ongoing requests.
Raises
------
PySnmpError
Or its derivative indicating that an error occurred while
performing SNMP operation.
Examples
--------
>>> from pysnmp.hlapi.v1arch.asyncore import *
>>>
>>> def cbFun(snmpDispatcher, stateHandle, errorIndication,
>>> errorStatus, errorIndex, varBinds, cbCtx):
>>> print(errorIndication, errorStatus, errorIndex, varBinds)
>>>
>>> snmpDispatcher = snmpDispatcher()
>>>
>>> stateHandle = bulkCmd(
>>> snmpDispatcher,
>>> CommunityData('public'),
>>> UdpTransportTarget(('demo.snmplabs.com', 161)),
>>> 0, 2,
>>> ('1.3.6.1.2.1.1', None),
>>> cbFun=cbFun
>>> )
>>>
>>> snmpDispatcher.transportDispatcher.runDispatcher()
|
def append(self, symbol, metadata, start_time=None):
"""
Update metadata entry for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict`
to be persisted
start_time : `datetime.datetime`
when metadata becomes effective
Default: datetime.datetime.utcnow()
"""
if start_time is None:
start_time = dt.utcnow()
old_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
if old_metadata is not None:
if old_metadata['start_time'] >= start_time:
raise ValueError('start_time={} is earlier than the last metadata @{}'.format(start_time,
old_metadata['start_time']))
if old_metadata['metadata'] == metadata:
return old_metadata
elif metadata is None:
return
self.find_one_and_update({'symbol': symbol}, {'$set': {'end_time': start_time}},
sort=[('start_time', pymongo.DESCENDING)])
document = {'_id': bson.ObjectId(), 'symbol': symbol, 'metadata': metadata, 'start_time': start_time}
mongo_retry(self.insert_one)(document)
logger.debug('Finished writing metadata for %s', symbol)
return document
|
Update metadata entry for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict`
to be persisted
start_time : `datetime.datetime`
when metadata becomes effective
Default: datetime.datetime.utcnow()
|
def loop_position(self):
"""Return the current sort in the loop"""
for i, v in enumerate(self._sort_loop):
if v == glances_processes.sort_key:
return i
return 0
|
Return the current sort in the loop
|
def adj_nodes_gcp(gcp_nodes):
"""Adjust details specific to GCP."""
for node in gcp_nodes:
node.cloud = "gcp"
node.cloud_disp = "GCP"
node.private_ips = ip_to_str(node.private_ips)
node.public_ips = ip_to_str(node.public_ips)
node.zone = node.extra['zone'].name
return gcp_nodes
|
Adjust details specific to GCP.
|
def add_job(self, task, inputdata, debug=False):
"""
Add a job in the queue and returns a submission id.
:param task: Task instance
:type task: inginious.frontend.tasks.WebAppTask
:param inputdata: the input as a dictionary
:type inputdata: dict
:param debug: If debug is true, more debug data will be saved
:type debug: bool or string
:returns: the new submission id and the removed submission id
"""
if not self._user_manager.session_logged_in():
raise Exception("A user must be logged in to submit an object")
username = self._user_manager.session_username()
# Prevent student from submitting several submissions together
waiting_submission = self._database.submissions.find_one({
"courseid": task.get_course_id(),
"taskid": task.get_id(),
"username": username,
"status": "waiting"})
if waiting_submission is not None:
raise Exception("A submission is already pending for this task!")
obj = {
"courseid": task.get_course_id(),
"taskid": task.get_id(),
"status": "waiting",
"submitted_on": datetime.now(),
"username": [username],
"response_type": task.get_response_type()
}
# Send additional data to the client in inputdata. For now, the username and the language. New fields can be added with the
# new_submission hook
inputdata["@username"] = username
inputdata["@lang"] = self._user_manager.session_language()
# Retrieve input random
states = self._database.user_tasks.find_one({"courseid": task.get_course_id(), "taskid": task.get_id(), "username": username}, {"random": 1, "state": 1})
inputdata["@random"] = states["random"] if "random" in states else []
inputdata["@state"] = states["state"] if "state" in states else ""
self._hook_manager.call_hook("new_submission", submission=obj, inputdata=inputdata)
obj["input"] = self._gridfs.put(bson.BSON.encode(inputdata))
self._before_submission_insertion(task, inputdata, debug, obj)
submissionid = self._database.submissions.insert(obj)
to_remove = self._after_submission_insertion(task, inputdata, debug, obj, submissionid)
ssh_callback = lambda host, port, password: self._handle_ssh_callback(submissionid, host, port, password)
jobid = self._client.new_job(task, inputdata,
(lambda result, grade, problems, tests, custom, state, archive, stdout, stderr:
self._job_done_callback(submissionid, task, result, grade, problems, tests, custom, state, archive, stdout, stderr, True)),
"Frontend - {}".format(username), debug, ssh_callback)
self._database.submissions.update(
{"_id": submissionid, "status": "waiting"},
{"$set": {"jobid": jobid}}
)
self._logger.info("New submission from %s - %s - %s/%s - %s", self._user_manager.session_username(),
self._user_manager.session_email(), task.get_course_id(), task.get_id(),
web.ctx['ip'])
return submissionid, to_remove
|
Add a job in the queue and returns a submission id.
:param task: Task instance
:type task: inginious.frontend.tasks.WebAppTask
:param inputdata: the input as a dictionary
:type inputdata: dict
:param debug: If debug is true, more debug data will be saved
:type debug: bool or string
:returns: the new submission id and the removed submission id
|
def switch_off(self):
"""Turn the switch off."""
success = self.set_status(CONST.STATUS_OFF_INT)
if success:
self._json_state['status'] = CONST.STATUS_OFF
return success
|
Turn the switch off.
|
def alter_old_distutils_request(request: WSGIRequest):
"""Alter the request body for compatibility with older distutils clients
Due to a bug in the Python distutils library, the request post is sent
using \n as a separator instead of the \r\n that the HTTP spec demands.
This breaks the Django form parser and therefore we have to write a
custom parser.
This bug was fixed in the Python 2.7.4 and 3.4:
http://bugs.python.org/issue10510
"""
# We first need to retrieve the body before accessing POST or FILES since
# it can only be read once.
body = request.body
if request.POST or request.FILES:
return
new_body = BytesIO()
# Split the response in the various parts based on the boundary string
content_type, opts = parse_header(request.META['CONTENT_TYPE'].encode('ascii'))
parts = body.split(b'\n--' + opts['boundary'] + b'\n')
for part in parts:
if b'\n\n' not in part:
continue
headers, content = part.split(b'\n\n', 1)
if not headers:
continue
new_body.write(b'--' + opts['boundary'] + b'\r\n')
new_body.write(headers.replace(b'\n', b'\r\n'))
new_body.write(b'\r\n\r\n')
new_body.write(content)
new_body.write(b'\r\n')
new_body.write(b'--' + opts['boundary'] + b'--\r\n')
request._body = new_body.getvalue()
request.META['CONTENT_LENGTH'] = len(request._body)
# Clear out _files and _post so that the request object re-parses the body
if hasattr(request, '_files'):
delattr(request, '_files')
if hasattr(request, '_post'):
delattr(request, '_post')
|
Alter the request body for compatibility with older distutils clients
Due to a bug in the Python distutils library, the request post is sent
using \n as a separator instead of the \r\n that the HTTP spec demands.
This breaks the Django form parser and therefore we have to write a
custom parser.
This bug was fixed in the Python 2.7.4 and 3.4:
http://bugs.python.org/issue10510
|
def get_collection(self, collection, database_name=None, username=None, password=None):
"""
Get a pymongo collection handle.
:param collection: Name of collection
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo collection object
"""
_db = self.get_database(database_name, username, password)
return _db[collection]
|
Get a pymongo collection handle.
:param collection: Name of collection
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo collection object
|
def add(self, *args):
"""Add a path template and handler.
:param name: Optional. If specified, allows reverse path lookup with
:meth:`reverse`.
:param template: A string or :class:`~potpy.template.Template`
instance used to match paths against. Strings will be wrapped in a
Template instance.
:param handler: A callable or :class:`~potpy.router.Route` instance
which will handle calls for the given path. See
:meth:`potpy.router.Router.add` for details.
"""
if len(args) > 2:
name, template = args[:2]
args = args[2:]
else:
name = None
template = args[0]
args = args[1:]
if isinstance(template, tuple):
template, type_converters = template
template = Template(template, **type_converters)
elif not isinstance(template, Template):
template = Template(template)
if name:
self._templates[name] = template
super(PathRouter, self).add(template, *args)
|
Add a path template and handler.
:param name: Optional. If specified, allows reverse path lookup with
:meth:`reverse`.
:param template: A string or :class:`~potpy.template.Template`
instance used to match paths against. Strings will be wrapped in a
Template instance.
:param handler: A callable or :class:`~potpy.router.Route` instance
which will handle calls for the given path. See
:meth:`potpy.router.Router.add` for details.
|
def rewrap_bytes(data):
'''Rewrap characters to 70 character width.
Intended to rewrap base64 content.
'''
return b'\n'.join(
data[index:index+70] for index in range(0, len(data), 70)
)
|
Rewrap characters to 70 character width.
Intended to rewrap base64 content.
|
def default_is_local(hadoop_conf=None, hadoop_home=None):
"""\
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured.
"""
params = pydoop.hadoop_params(hadoop_conf, hadoop_home)
for k in 'fs.defaultFS', 'fs.default.name':
if not params.get(k, 'file:').startswith('file:'):
return False
return True
|
\
Is Hadoop configured to use the local file system?
By default, it is. A DFS must be explicitly configured.
|
def get_type_data(name):
"""Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
"""
name = name.upper()
try:
return {
'authority': 'birdland.mit.edu',
'namespace': 'coordinate format',
'identifier': name,
'domain': 'Coordinate Format Types',
'display_name': JEFFS_COORDINATE_FORMAT_TYPES[name] + ' Coordinate Format Type',
'display_label': JEFFS_COORDINATE_FORMAT_TYPES[name],
'description': ('The type for the ' +
JEFFS_COORDINATE_FORMAT_TYPES[name] +
' Geographic coordinate format.')
}
except KeyError:
raise NotFound('CoordinateFormat Type: ' + name)
|
Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
|
def sync_scheduler(self):
"""Download the scheduler.info file and perform a smart comparison
with what we currently have so that we don't overwrite the
last_run timestamp
To do a smart comparison, we go over each entry in the
server's scheduler file. If a scheduler entry is not present
in the server copy, we delete it in the client copy and if the
scheduler entry is present in the server copy, then we
overwrite the frequency count in the client copy
"""
# get the server scheduler.info file
url = "%s/%s/%s" % (self.config['server']['server_url'],
"experiments", "scheduler.info")
try:
req = requests.get(url, proxies=self.config['proxy']['proxy'],
auth=self.auth,
verify=self.verify)
req.raise_for_status()
except Exception as exp:
logging.exception("Error trying to download scheduler.info: %s" % exp)
raise exp
try:
server_sched = json.loads(req.content)
except Exception as exp:
logging.exception("Error parsing server scheduler: %s" % exp)
raise exp
sched_filename = os.path.join(self.config['dirs']['experiments_dir'],
'scheduler.info')
if not os.path.exists(sched_filename):
with open(sched_filename, 'w') as file_p:
json.dump(server_sched, file_p, indent=2,
separators=(',', ': '))
return
client_sched = {}
try:
with open(sched_filename, 'r') as file_p:
client_sched = json.load(file_p)
except Exception as exp:
client_sched = {}
logging.exception("Error loading scheduler file: %s" % exp)
logging.info("Making an empty scheduler")
# delete any scheduled tasks as necessary
#
# Note: this looks ugly, but we can't modify dictionaries
# while we iterate over them
client_exp_keys = client_sched.keys()
for exp in client_exp_keys:
if exp not in server_sched:
del client_sched[exp]
# and update all the other frequencies
for exp in server_sched:
if exp in client_sched:
client_sched[exp]['frequency'] = server_sched[exp]['frequency']
else:
client_sched[exp] = server_sched[exp]
# write out the results
with open(sched_filename, 'w') as file_p:
json.dump(client_sched, file_p, indent=2,
separators=(',', ': '))
|
Download the scheduler.info file and perform a smart comparison
with what we currently have so that we don't overwrite the
last_run timestamp
To do a smart comparison, we go over each entry in the
server's scheduler file. If a scheduler entry is not present
in the server copy, we delete it in the client copy and if the
scheduler entry is present in the server copy, then we
overwrite the frequency count in the client copy
|
def append(self, event):
"""Add an event to the list."""
self._events.append(event)
self._events_by_baseclass[event.baseclass].append(event)
|
Add an event to the list.
|
def _purge(self):
"""
Trim the cache down to max_size by evicting the
least-recently-used entries.
"""
if len(self.cache) <= self.max_size:
return
cache = self.cache
refcount = self.refcount
queue = self.queue
max_size = self.max_size
# purge least recently used entries, using refcount to count entries
# that appear multiple times in the queue
while len(cache) > max_size:
refc = 1
while refc:
k = queue.popleft()
refc = refcount[k] = refcount[k] - 1
del cache[k]
del refcount[k]
|
Trim the cache down to max_size by evicting the
least-recently-used entries.
|
def eye_plot(x,L,S=0):
"""
Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
L : display length in samples (usually two symbols)
S : start index
Returns
-------
None : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
1000 bits at 10 samples per bit with 'rc' shaping.
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import digitalcom as dc
>>> x,b, data = dc.NRZ_bits(1000,10,'rc')
>>> dc.eye_plot(x,20,60)
>>> plt.show()
"""
plt.figure(figsize=(6,4))
idx = np.arange(0,L+1)
plt.plot(idx,x[S:S+L+1],'b')
k_max = int((len(x) - S)/L)-1
for k in range(1,k_max):
plt.plot(idx,x[S+k*L:S+L+1+k*L],'b')
plt.grid()
plt.xlabel('Time Index - n')
plt.ylabel('Amplitude')
plt.title('Eye Plot')
return 0
|
Eye pattern plot of a baseband digital communications waveform.
The signal must be real, but can be multivalued in terms of the underlying
modulation scheme. Used for BPSK eye plots in the Case Study article.
Parameters
----------
x : ndarray of the real input data vector/array
L : display length in samples (usually two symbols)
S : start index
Returns
-------
None : A plot window opens containing the eye plot
Notes
-----
Increase S to eliminate filter transients.
Examples
--------
1000 bits at 10 samples per bit with 'rc' shaping.
>>> import matplotlib.pyplot as plt
>>> from sk_dsp_comm import digitalcom as dc
>>> x,b, data = dc.NRZ_bits(1000,10,'rc')
>>> dc.eye_plot(x,20,60)
>>> plt.show()
|
def remove_edge_fun(graph):
"""
Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable
"""
# Namespace shortcut for speed.
rm_edge, rm_node = graph.remove_edge, graph.remove_node
from networkx import is_isolate
def remove_edge(u, v):
rm_edge(u, v) # Remove the edge.
if is_isolate(graph, v): # Check if v is isolate.
rm_node(v) # Remove the isolate out node.
return remove_edge
|
Returns a function that removes an edge from the `graph`.
..note:: The out node is removed if this is isolate.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:return:
A function that remove an edge from the `graph`.
:rtype: callable
|
def get_branches(aliases):
"""Get unique branch names from an alias dictionary."""
ignore = ['pow', 'log10', 'sqrt', 'max']
branches = []
for k, v in aliases.items():
tokens = re.sub('[\(\)\+\*\/\,\=\<\>\&\!\-\|]', ' ', v).split()
for t in tokens:
if bool(re.search(r'^\d', t)) or len(t) <= 3:
continue
if bool(re.search(r'[a-zA-Z]', t)) and t not in ignore:
branches += [t]
return list(set(branches))
|
Get unique branch names from an alias dictionary.
|
def read_index(self, fh, indexed_fh, rec_iterator=None,
rec_hash_func=None, parse_hash=str, flush=True,
no_reindex=True, verbose=False):
"""
Populate this index from a file. Input format is just a tab-separated file,
one record per line. The last column is the file location for the record
and all columns before that are collectively considered to be the hash key
for that record (which is probably only 1 column, but this allows us to
permit tabs in hash keys). Lines consisting only of whitespace are skipped.
:param fh: filename or stream-like object to read from.
:param indexed_fh: either the filename of the indexed file or handle to
it.
:param rec_iterator: a function that will return an interator for the
indexed file type (not the iterator for the file
itself). This function must take a single argument
which is the name the file to iterate over, or a
stream like object similar to a filestream.
:param rec_hash_func: a function that accepts the record type produced by
the iterator and produces a unique hash for each
record.
:param parse_hash: a function to convert the string representation of
the hash into whatever type is needed. By default,
we just leave these as strings.
:param flush: remove everything currently in the index and discard
any details about a file that is already
fully/partially indexed by this object. This is the
default behavior. If False, then data from <fh> is
just added to the existing index data (potentially
overwriting some of it) and the existing index can
continue to be used as before.
:param no_reindex: if True, after loading the index, a missing key will
cause an exception, rather than trigger re-scanning
the indexed file for the associated record. The only
reason to set this to False would be if your index
was incomplete.
:param verbose: output status message to STDERR about progress
reading the index (if possible).
:raise IndexError: on malformed line in input file/stream
"""
# set the record iterator and hash functions, if they were given
if rec_iterator is not None:
self.record_iterator = rec_iterator
if rec_hash_func is not None:
self.record_hash_function = rec_hash_func
# disable re-indexing?
self._no_reindex = no_reindex
# figure out what kind of index identifier we got: handle or filename?
handle = fh
try:
handle = open(fh)
except TypeError:
# okay, not a filename, we'll try treating it as a stream to read from.
pass
# clear this index?
if flush:
self._index = {}
self._indexed_file_handle = None
self._indexed_file_name = None
# replace the name/handle for the indexed file
indexed_fn = None
try:
# try treating this as a filename
self.indexed_file = (indexed_fh, None)
indexed_fn = indexed_fh
except TypeError:
try:
# try treating this as a file handle
self.indexed_file = (None, indexed_fh)
except TypeError:
fn = " from " + str(fh) if indexed_fn is not None else ""
raise IndexError("failed to read index" + fn + "; "
"reason: expected indexed filename or stream-like "
"object, got " + str(type(indexed_fh)))
# try to get an idea of how much data we have...
if verbose:
try:
total = os.path.getsize(handle.name)
pind = ProgressIndicator(totalToDo=total, messagePrefix="completed",
messageSuffix="of loading " + handle.name)
except AttributeError as e:
sys.stderr.write(str(e))
sys.stderr.write("completed [unknown] of loading index")
verbose = False
# read the index file and populate this object
for line in handle:
line = line.rstrip()
if verbose:
pind.done = handle.tell()
pind.showProgress()
if line.isspace():
continue
parts = line.split("\t")
if len(parts) < 2:
raise IndexError("failed to parse line: '" + line + "'")
key = parse_hash("\t".join(parts[:-1]))
value = parts[-1]
self._index[key] = int(value)
|
Populate this index from a file. Input format is just a tab-separated file,
one record per line. The last column is the file location for the record
and all columns before that are collectively considered to be the hash key
for that record (which is probably only 1 column, but this allows us to
permit tabs in hash keys). Lines consisting only of whitespace are skipped.
:param fh: filename or stream-like object to read from.
:param indexed_fh: either the filename of the indexed file or handle to
it.
:param rec_iterator: a function that will return an interator for the
indexed file type (not the iterator for the file
itself). This function must take a single argument
which is the name the file to iterate over, or a
stream like object similar to a filestream.
:param rec_hash_func: a function that accepts the record type produced by
the iterator and produces a unique hash for each
record.
:param parse_hash: a function to convert the string representation of
the hash into whatever type is needed. By default,
we just leave these as strings.
:param flush: remove everything currently in the index and discard
any details about a file that is already
fully/partially indexed by this object. This is the
default behavior. If False, then data from <fh> is
just added to the existing index data (potentially
overwriting some of it) and the existing index can
continue to be used as before.
:param no_reindex: if True, after loading the index, a missing key will
cause an exception, rather than trigger re-scanning
the indexed file for the associated record. The only
reason to set this to False would be if your index
was incomplete.
:param verbose: output status message to STDERR about progress
reading the index (if possible).
:raise IndexError: on malformed line in input file/stream
|
def filter_search(self, code=None, name=None, abilities=None,
attributes=None, info=None):
"""
Return a list of codes and names pertaining to cards that have the
given information values stored.
Can take a code integer, name string, abilities dict {phase: ability
list/"*"}, attributes list, info dict {key, value list/"*"}.
In the above argument examples "*" is a string that may be passed
instead of a list as the dict value to match anything that stores that
key.
"""
command = "SELECT code, name FROM CARDS "
command += Where_filter_gen(("code", code), ("name", name),
("abilities", abilities),
("attributes", attributes),
("info", info))
with sqlite3.connect(self.dbname) as carddb:
return carddb.execute(command).fetchall()
|
Return a list of codes and names pertaining to cards that have the
given information values stored.
Can take a code integer, name string, abilities dict {phase: ability
list/"*"}, attributes list, info dict {key, value list/"*"}.
In the above argument examples "*" is a string that may be passed
instead of a list as the dict value to match anything that stores that
key.
|
def perform_oauth(email, master_token, android_id, service, app, client_sig,
device_country='us', operatorCountry='us', lang='en',
sdk_version=17):
"""
Use a master token from master_login to perform OAuth to a specific Google
service.
Return a dict, eg::
{
'Auth': '...',
'LSID': '...',
'SID': '..',
'issueAdvice': 'auto',
'services': 'hist,mail,googleme,...'
}
To authenticate requests to this service, include a header
``Authorization: GoogleLogin auth=res['Auth']``.
"""
data = {
'accountType': 'HOSTED_OR_GOOGLE',
'Email': email,
'has_permission': 1,
'EncryptedPasswd': master_token,
'service': service,
'source': 'android',
'androidId': android_id,
'app': app,
'client_sig': client_sig,
'device_country': device_country,
'operatorCountry': device_country,
'lang': lang,
'sdk_version': sdk_version
}
return _perform_auth_request(data)
|
Use a master token from master_login to perform OAuth to a specific Google
service.
Return a dict, eg::
{
'Auth': '...',
'LSID': '...',
'SID': '..',
'issueAdvice': 'auto',
'services': 'hist,mail,googleme,...'
}
To authenticate requests to this service, include a header
``Authorization: GoogleLogin auth=res['Auth']``.
|
def MGMT_ACTIVE_SET(self, sAddr='', xCommissioningSessionId=None, listActiveTimestamp=None, listChannelMask=None, xExtendedPanId=None,
sNetworkName=None, sPSKc=None, listSecurityPolicy=None, xChannel=None, sMeshLocalPrefix=None, xMasterKey=None,
xPanId=None, xTmfPort=None, xSteeringData=None, xBorderRouterLocator=None, BogusTLV=None, xDelayTimer=None):
"""send MGMT_ACTIVE_SET command
Returns:
True: successful to send MGMT_ACTIVE_SET
False: fail to send MGMT_ACTIVE_SET
"""
print '%s call MGMT_ACTIVE_SET' % self.port
try:
cmd = 'dataset mgmtsetcommand active'
if listActiveTimestamp != None:
cmd += ' activetimestamp '
cmd += str(listActiveTimestamp[0])
if xExtendedPanId != None:
cmd += ' extpanid '
xpanid = self.__convertLongToString(xExtendedPanId)
if len(xpanid) < 16:
xpanid = xpanid.zfill(16)
cmd += xpanid
if sNetworkName != None:
cmd += ' networkname '
cmd += str(sNetworkName)
if xChannel != None:
cmd += ' channel '
cmd += str(xChannel)
if sMeshLocalPrefix != None:
cmd += ' localprefix '
cmd += str(sMeshLocalPrefix)
if xMasterKey != None:
cmd += ' masterkey '
key = self.__convertLongToString(xMasterKey)
if len(key) < 32:
key = key.zfill(32)
cmd += key
if xPanId != None:
cmd += ' panid '
cmd += str(xPanId)
if listChannelMask != None:
cmd += ' channelmask '
cmd += '0x' + self.__convertLongToString(self.__convertChannelMask(listChannelMask))
if sPSKc != None or listSecurityPolicy != None or \
xCommissioningSessionId != None or xTmfPort != None or xSteeringData != None or xBorderRouterLocator != None or \
BogusTLV != None:
cmd += ' binary '
if sPSKc != None:
cmd += '0410'
stretchedPskc = Thread_PBKDF2.get(sPSKc,ModuleHelper.Default_XpanId,ModuleHelper.Default_NwkName)
pskc = hex(stretchedPskc).rstrip('L').lstrip('0x')
if len(pskc) < 32:
pskc = pskc.zfill(32)
cmd += pskc
if listSecurityPolicy != None:
cmd += '0c03'
rotationTime = 0
policyBits = 0
# previous passing way listSecurityPolicy=[True, True, 3600, False, False, True]
if (len(listSecurityPolicy) == 6):
rotationTime = listSecurityPolicy[2]
# the last three reserved bits must be 1
policyBits = 0b00000111
if listSecurityPolicy[0]:
policyBits = policyBits | 0b10000000
if listSecurityPolicy[1]:
policyBits = policyBits | 0b01000000
if listSecurityPolicy[3]:
policyBits = policyBits | 0b00100000
if listSecurityPolicy[4]:
policyBits = policyBits | 0b00010000
if listSecurityPolicy[5]:
policyBits = policyBits | 0b00001000
else:
# new passing way listSecurityPolicy=[3600, 0b11001111]
rotationTime = listSecurityPolicy[0]
policyBits = listSecurityPolicy[1]
policy = str(hex(rotationTime))[2:]
if len(policy) < 4:
policy = policy.zfill(4)
cmd += policy
cmd += str(hex(policyBits))[2:]
if xCommissioningSessionId != None:
cmd += '0b02'
sessionid = str(hex(xCommissioningSessionId))[2:]
if len(sessionid) < 4:
sessionid = sessionid.zfill(4)
cmd += sessionid
if xBorderRouterLocator != None:
cmd += '0902'
locator = str(hex(xBorderRouterLocator))[2:]
if len(locator) < 4:
locator = locator.zfill(4)
cmd += locator
if xSteeringData != None:
steeringData = self.__convertLongToString(xSteeringData)
cmd += '08' + str(len(steeringData)/2).zfill(2)
cmd += steeringData
if BogusTLV != None:
cmd += "8202aa55"
print cmd
return self.__sendCommand(cmd)[0] == 'Done'
except Exception, e:
ModuleHelper.WriteIntoDebugLogger("MGMT_ACTIVE_SET() Error: " + str(e))
|
send MGMT_ACTIVE_SET command
Returns:
True: successful to send MGMT_ACTIVE_SET
False: fail to send MGMT_ACTIVE_SET
|
def daemon_start(self):
"""Start daemon when gtk loaded
"""
if daemon_status() == "SUN not running":
subprocess.call("{0} &".format(self.cmd), shell=True)
|
Start daemon when gtk loaded
|
def geturl(urllib2_resp):
"""
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
"""
url = urllib2_resp.geturl()
scheme, rest = url.split(':', 1)
if rest.startswith('//'):
return url
else:
# FIXME: write a good test to cover it
return '%s://%s' % (scheme, rest)
|
Use instead of urllib.addinfourl.geturl(), which appears to have
some issues with dropping the double slash for certain schemes
(e.g. file://). This implementation is probably over-eager, as it
always restores '://' if it is missing, and it appears some url
schemata aren't always followed by '//' after the colon, but as
far as I know pip doesn't need any of those.
The URI RFC can be found at: http://tools.ietf.org/html/rfc1630
This function assumes that
scheme:/foo/bar
is the same as
scheme:///foo/bar
|
def _parse_attribute(
self,
element, # type: ET.Element
attribute, # type: Text
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value within the XML element's attribute."""
parsed_value = self._default
attribute_value = element.get(attribute, None)
if attribute_value is not None:
parsed_value = self._parser_func(attribute_value, state)
elif self.required:
state.raise_error(
MissingValue, 'Missing required attribute "{}" on element "{}"'.format(
self._attribute, element.tag
)
)
return parsed_value
|
Parse the primitive value within the XML element's attribute.
|
def encode_sid(cls, secret, sid):
"""Computes the HMAC for the given session id."""
secret_bytes = secret.encode("utf-8")
sid_bytes = sid.encode("utf-8")
sig = hmac.new(secret_bytes, sid_bytes, hashlib.sha512).hexdigest()
return "%s%s" % (sig, sid)
|
Computes the HMAC for the given session id.
|
def unbind(self, callback):
"""Remove a callback from the list
"""
handlers = self._handlers
if handlers:
filtered_callbacks = [f for f in handlers if f != callback]
removed_count = len(handlers) - len(filtered_callbacks)
if removed_count:
self._handlers = filtered_callbacks
return removed_count
return 0
|
Remove a callback from the list
|
def keep(self, diff):
""" Mark this diff (or volume) to be kept in path. """
(toUUID, fromUUID) = self.toArg.diff(diff)
self._client.keep(toUUID, fromUUID)
logger.debug("Kept %s", diff)
|
Mark this diff (or volume) to be kept in path.
|
def _split_refextract_authors_str(authors_str):
"""Extract author names out of refextract authors output."""
author_seq = (x.strip() for x in RE_SPLIT_AUTH.split(authors_str) if x)
res = []
current = ''
for author in author_seq:
if not isinstance(author, six.text_type):
author = six.text_type(author.decode('utf8', 'ignore'))
# First clean the token.
author = re.sub(r'\(|\)', '', author, re.U)
# Names usually start with characters.
author = re.sub(r'^[\W\d]+', '', author, re.U)
# Names should end with characters or dot.
author = re.sub(r'[^.\w]+$', '', author, re.U)
# If we have initials join them with the previous token.
if RE_INITIALS_ONLY.match(author):
current += ', ' + author.strip().replace('. ', '.')
else:
if current:
res.append(current)
current = author
# Add last element.
if current:
res.append(current)
# Manual filterings that we don't want to add in regular expressions since
# it would make them more complex.
# * ed might sneak in
# * many legacy refs look like 'X. and Somebody E.'
# * might miss lowercase initials
filters = [
lambda a: a == 'ed',
lambda a: a.startswith(','),
lambda a: len(a) == 1
]
res = [r for r in res if all(not f(r) for f in filters)]
return res
|
Extract author names out of refextract authors output.
|
def get_create_security_group_commands(self, sg_id, sg_rules):
"""Commands for creating ACL"""
cmds = []
in_rules, eg_rules = self._format_rules_for_eos(sg_rules)
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
for in_rule in in_rules:
cmds.append(in_rule)
cmds.append("exit")
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
for eg_rule in eg_rules:
cmds.append(eg_rule)
cmds.append("exit")
return cmds
|
Commands for creating ACL
|
async def update_server_data(server):
"""
Updates the server info for the given server
Args:
server: The Discord server to update info for
"""
data = datatools.get_data()
# Add the server to server data if it doesn't yet exist
send_welcome_message = False
if server.id not in data["discord"]["servers"]:
logger.debug("Adding new server to serverdata")
data["discord"]["servers"][server.id] = {"prefix": "!"}
if "mute_intro" not in data or not data["mute_intro"]:
send_welcome_message = True
# Make sure all modules are in the server
_dir = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
_dir_modules = "{}/../".format(_dir)
for module_name in os.listdir(_dir_modules):
if module_name.startswith("_") or module_name.startswith("!"):
continue
if not os.path.isfile("{}/{}/_data.py".format(_dir_modules, module_name)):
logger.warning("No _data.py file found for module {}".format(module_name))
continue
try:
import_name = ".discord_modis.modules.{}.{}".format(module_name, "_data")
_data = importlib.import_module(import_name, "modis")
if _data.modulename not in data["discord"]["servers"][server.id]:
data["discord"]["servers"][server.id][_data.modulename] = _data.sd_structure
datatools.write_data(data)
except Exception as e:
logger.error("Could not initialise module {}".format(module_name))
logger.exception(e)
datatools.write_data(data)
# Send a welcome message now
if send_welcome_message:
default_channel = server.default_channel
if not default_channel:
for channel in server.channels:
if channel.name == "general":
default_channel = channel
break
if not default_channel:
for channel in server.channels:
if "general" in channel.name:
default_channel = channel
break
if not default_channel:
for channel in server.channels:
if channel.type == discord.ChannelType.text:
default_channel = channel
break
# Display a welcome message
if default_channel:
hello_message = "Hello! I'm Modis.\n\n" + \
"The prefix is currently `!`, and can be changed at any time using `!prefix`\n\n" + \
"You can use `!help` to get help commands for all modules, " + \
"or {} me to get the server prefix and help commands.".format(server.me.mention)
await client.send_message(default_channel, hello_message)
|
Updates the server info for the given server
Args:
server: The Discord server to update info for
|
def ParseCall(self, parser_mediator, query, row, **unused_kwargs):
"""Parses a call.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
query (Optional[str]): query.
"""
query_hash = hash(query)
guid = self._GetRowValue(query_hash, row, 'guid')
is_incoming = self._GetRowValue(query_hash, row, 'is_incoming')
videostatus = self._GetRowValue(query_hash, row, 'videostatus')
try:
aux = guid
if aux:
aux_list = aux.split('-')
src_aux = aux_list[0]
dst_aux = aux_list[1]
else:
src_aux = 'Unknown [no GUID]'
dst_aux = 'Unknown [no GUID]'
except IndexError:
src_aux = 'Unknown [{0:s}]'.format(guid)
dst_aux = 'Unknown [{0:s}]'.format(guid)
if is_incoming == '0':
user_start_call = True
source = src_aux
ip_address = self._GetRowValue(query_hash, row, 'ip_address')
if ip_address:
destination = '{0:s} <{1:s}>'.format(dst_aux, ip_address)
else:
destination = dst_aux
else:
user_start_call = False
source = src_aux
destination = dst_aux
call_identifier = self._GetRowValue(query_hash, row, 'id')
event_data = SkypeCallEventData()
event_data.dst_call = destination
event_data.offset = call_identifier
event_data.query = query
event_data.src_call = source
event_data.user_start_call = user_start_call
event_data.video_conference = videostatus == '3'
timestamp = self._GetRowValue(query_hash, row, 'try_call')
event_data.call_type = 'WAITING'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
timestamp = self._GetRowValue(query_hash, row, 'accept_call')
timestamp = int(timestamp)
except (ValueError, TypeError):
timestamp = None
if timestamp:
event_data.call_type = 'ACCEPTED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
try:
call_duration = self._GetRowValue(query_hash, row, 'call_duration')
call_duration = int(call_duration)
except (ValueError, TypeError):
parser_mediator.ProduceExtractionWarning(
'unable to determine when call: {0:s} was finished.'.format(
call_identifier))
call_duration = None
if call_duration:
timestamp += call_duration
event_data.call_type = 'FINISHED'
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(date_time, 'Call from Skype')
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a call.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
query (Optional[str]): query.
|
def multigrid(bounds, points_count):
"""
Generates a multidimensional lattice
:param bounds: box constraints
:param points_count: number of points per dimension.
"""
if len(bounds)==1:
return np.linspace(bounds[0][0], bounds[0][1], points_count).reshape(points_count, 1)
x_grid_rows = np.meshgrid(*[np.linspace(b[0], b[1], points_count) for b in bounds])
x_grid_columns = np.vstack([x.flatten(order='F') for x in x_grid_rows]).T
return x_grid_columns
|
Generates a multidimensional lattice
:param bounds: box constraints
:param points_count: number of points per dimension.
|
def _get_ukko_report():
'''Get Ukko's report from the fixed URL.
'''
with urllib.request.urlopen(URL_UKKO_REPORT) as response:
ret = str(response.read())
return ret
|
Get Ukko's report from the fixed URL.
|
def optimal_partitions(sizes, counts, num_part):
"""Compute the optimal partitions given a distribution of set sizes.
Args:
sizes (numpy.array): The complete domain of set sizes in ascending
order.
counts (numpy.array): The frequencies of all set sizes in the same
order as `sizes`.
num_part (int): The number of partitions to create.
Returns:
list: A list of partitions in the form of `(lower, upper)` tuples,
where `lower` and `upper` are lower and upper bound (inclusive)
set sizes of each partition.
"""
if num_part < 2:
return [(sizes[0], sizes[-1])]
if num_part >= len(sizes):
partitions = [(x, x) for x in sizes]
return partitions
nfps = _compute_nfps_real(counts, sizes)
partitions, _, _ = _compute_best_partitions(num_part, sizes, nfps)
return partitions
|
Compute the optimal partitions given a distribution of set sizes.
Args:
sizes (numpy.array): The complete domain of set sizes in ascending
order.
counts (numpy.array): The frequencies of all set sizes in the same
order as `sizes`.
num_part (int): The number of partitions to create.
Returns:
list: A list of partitions in the form of `(lower, upper)` tuples,
where `lower` and `upper` are lower and upper bound (inclusive)
set sizes of each partition.
|
def _concatenate_shape(input_shape, axis=-1): # pylint: disable=invalid-name
"""Helper to determine the shape of Concatenate output."""
ax = axis % len(input_shape[0])
concat_size = sum(shape[ax] for shape in input_shape)
out_shape = input_shape[0][:ax] + (concat_size,) + input_shape[0][ax+1:]
return out_shape
|
Helper to determine the shape of Concatenate output.
|
def check_num_slices(num_slices, img_shape=None, num_dims=3):
"""Ensures requested number of slices is valid.
Atleast 1 and atmost the image size, if available
"""
if not isinstance(num_slices, Iterable) or len(num_slices) == 1:
num_slices = np.repeat(num_slices, num_dims)
if img_shape is not None:
if len(num_slices) != len(img_shape):
raise ValueError('The number of dimensions requested is different from image.'
' Must be either 1 or equal to {}'.format(len(img_shape) + 1))
# upper bounding them to image shape
num_slices = np.minimum(img_shape, num_slices)
# lower bounding it to 1
return np.maximum(1, num_slices)
|
Ensures requested number of slices is valid.
Atleast 1 and atmost the image size, if available
|
def loads(cls, s):
"""
Load an instance of this class from YAML.
"""
with closing(StringIO(s)) as fileobj:
return cls.load(fileobj)
|
Load an instance of this class from YAML.
|
def show_exception_only(self, etype, evalue):
"""Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value
"""
# This method needs to use __call__ from *this* class, not the one from
# a subclass whose signature or behavior may be different
ostream = self.ostream
ostream.flush()
ostream.write('\n'.join(self.get_exception_only(etype, evalue)))
ostream.flush()
|
Only print the exception type and message, without a traceback.
Parameters
----------
etype : exception type
value : exception value
|
def sparse_or_dense_matvecmul(sparse_or_dense_matrix,
dense_vector,
validate_args=False,
name=None,
**kwargs):
"""Returns (batched) matmul of a (sparse) matrix with a column vector.
Args:
sparse_or_dense_matrix: `SparseTensor` or `Tensor` representing a (batch of)
matrices.
dense_vector: `Tensor` representing a (batch of) vectors, with the same
batch shape as `sparse_or_dense_matrix`. The shape must be compatible with
the shape of `sparse_or_dense_matrix` and kwargs.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: "sparse_or_dense_matvecmul".
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or
`tf.matmul`.
Returns:
product: A dense (batch of) vector-shaped Tensor of the same batch shape and
dtype as `sparse_or_dense_matrix` and `dense_vector`.
"""
with tf.compat.v1.name_scope(name, 'sparse_or_dense_matvecmul',
[sparse_or_dense_matrix, dense_vector]):
dense_vector = tf.convert_to_tensor(
value=dense_vector, dtype_hint=tf.float32, name='dense_vector')
return tf.squeeze(
sparse_or_dense_matmul(
sparse_or_dense_matrix,
dense_vector[..., tf.newaxis],
validate_args=validate_args,
**kwargs),
axis=[-1])
|
Returns (batched) matmul of a (sparse) matrix with a column vector.
Args:
sparse_or_dense_matrix: `SparseTensor` or `Tensor` representing a (batch of)
matrices.
dense_vector: `Tensor` representing a (batch of) vectors, with the same
batch shape as `sparse_or_dense_matrix`. The shape must be compatible with
the shape of `sparse_or_dense_matrix` and kwargs.
validate_args: When `True`, additional assertions might be embedded in the
graph.
Default value: `False` (i.e., no graph assertions are added).
name: Python `str` prefixed to ops created by this function.
Default value: "sparse_or_dense_matvecmul".
**kwargs: Keyword arguments to `tf.sparse_tensor_dense_matmul` or
`tf.matmul`.
Returns:
product: A dense (batch of) vector-shaped Tensor of the same batch shape and
dtype as `sparse_or_dense_matrix` and `dense_vector`.
|
def iteritems(self):
"""Iterate over all header lines, including duplicate ones."""
for key in self:
vals = self._container[key.lower()]
for val in vals[1:]:
yield vals[0], val
|
Iterate over all header lines, including duplicate ones.
|
def static_get_pdb_object(pdb_id, bio_cache = None, cache_dir = None):
'''This method does not necessarily use a BioCache but it seems to fit here.'''
pdb_id = pdb_id.upper()
if bio_cache:
return bio_cache.get_pdb_object(pdb_id)
if cache_dir:
# Check to see whether we have a cached copy of the PDB file
filepath = os.path.join(cache_dir, '{0}.pdb'.format(pdb_id))
if os.path.exists(filepath):
return PDB.from_filepath(filepath)
# Get any missing files from the RCSB and create cached copies if appropriate
pdb_contents = retrieve_pdb(pdb_id)
if cache_dir:
write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents)
return PDB(pdb_contents)
|
This method does not necessarily use a BioCache but it seems to fit here.
|
def call_api(self,
action,
params=None,
method=('API', 'POST', 'application/x-www-form-urlencoded'),
**kwargs):
"""
:param method: methodName
:param action: MethodUrl,
:param params: Dictionary,form params for api.
:param timeout: (optional) Float describing the timeout of the request.
:return:
"""
urltype, methodname, content_type = method
if urltype == 'SMS':
url = self.sms_host
else:
url = self.api_host
if content_type == 'application/json':
data = json.dumps(params)
else:
data = self._filter_params(params)
return self._http_call(
url=url + action,
method=methodname,
data=data,
headers=self._headers(content_type),
**kwargs)
|
:param method: methodName
:param action: MethodUrl,
:param params: Dictionary,form params for api.
:param timeout: (optional) Float describing the timeout of the request.
:return:
|
def scrape_metrics(self, scraper_config):
"""
Poll the data from prometheus and return the metrics as a generator.
"""
response = self.poll(scraper_config)
try:
# no dry run if no label joins
if not scraper_config['label_joins']:
scraper_config['_dry_run'] = False
elif not scraper_config['_watched_labels']:
# build the _watched_labels set
for val in itervalues(scraper_config['label_joins']):
scraper_config['_watched_labels'].add(val['label_to_match'])
for metric in self.parse_metric_family(response, scraper_config):
yield metric
# Set dry run off
scraper_config['_dry_run'] = False
# Garbage collect unused mapping and reset active labels
for metric, mapping in list(iteritems(scraper_config['_label_mapping'])):
for key in list(mapping):
if key not in scraper_config['_active_label_mapping'][metric]:
del scraper_config['_label_mapping'][metric][key]
scraper_config['_active_label_mapping'] = {}
finally:
response.close()
|
Poll the data from prometheus and return the metrics as a generator.
|
def extract_public_key(args):
""" Load an ECDSA private key and extract the embedded public key as raw binary data. """
sk = _load_ecdsa_signing_key(args)
vk = sk.get_verifying_key()
args.public_keyfile.write(vk.to_string())
print("%s public key extracted to %s" % (args.keyfile.name, args.public_keyfile.name))
|
Load an ECDSA private key and extract the embedded public key as raw binary data.
|
def read(self, entity=None, attrs=None, ignore=None, params=None):
"""Do not read certain fields.
Do not expect the server to return the ``content_view_filter``
attribute. This has no practical impact, as the attribute must be
provided when a :class:`nailgun.entities.ContentViewFilterRule` is
instantiated.
Also, ignore any field that is not returned by the server. For more
information, see `Bugzilla #1238408
<https://bugzilla.redhat.com/show_bug.cgi?id=1238408>`_.
"""
if entity is None:
entity = type(self)(
self._server_config,
# pylint:disable=no-member
content_view_filter=self.content_view_filter,
)
if attrs is None:
attrs = self.read_json()
if ignore is None:
ignore = set()
ignore.add('content_view_filter')
ignore.update([
field_name
for field_name in entity.get_fields().keys()
if field_name not in attrs
])
return super(ContentViewFilterRule, self).read(
entity, attrs, ignore, params)
|
Do not read certain fields.
Do not expect the server to return the ``content_view_filter``
attribute. This has no practical impact, as the attribute must be
provided when a :class:`nailgun.entities.ContentViewFilterRule` is
instantiated.
Also, ignore any field that is not returned by the server. For more
information, see `Bugzilla #1238408
<https://bugzilla.redhat.com/show_bug.cgi?id=1238408>`_.
|
def run(self):
"Get jobs from the queue and perform them as they arrive."
while 1:
# Sleep until there is a job to perform.
job = self.jobs.get()
# Yawn. Time to get some work done.
try:
job.run()
self.jobs.task_done()
except TerminationNotice:
self.jobs.task_done()
break
|
Get jobs from the queue and perform them as they arrive.
|
def register_token(platform, user_id, token, on_error=None, on_success=None):
""" Register a device token for a user.
:param str platform The platform which to register token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str token: the token to register.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, True, user_id, token=token, on_error=on_error, on_success=on_success)
|
Register a device token for a user.
:param str platform The platform which to register token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param str token: the token to register.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
|
def _scrape_song_lyrics_from_url(self, url):
""" Use BeautifulSoup to scrape song info off of a Genius song URL
:param url: URL for the web page to scrape lyrics from
"""
page = requests.get(url)
if page.status_code == 404:
return None
# Scrape the song lyrics from the HTML
html = BeautifulSoup(page.text, "html.parser")
div = html.find("div", class_="lyrics")
if not div:
return None # Sometimes the lyrics section isn't found
# Scrape lyrics if proper section was found on page
lyrics = div.get_text()
if self.remove_section_headers: # Remove [Verse], [Bridge], etc.
lyrics = re.sub('(\[.*?\])*', '', lyrics)
lyrics = re.sub('\n{2}', '\n', lyrics) # Gaps between verses
return lyrics.strip("\n")
|
Use BeautifulSoup to scrape song info off of a Genius song URL
:param url: URL for the web page to scrape lyrics from
|
def _send_unsigned_long(self,value):
"""
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
"""
# Coerce to int. This will throw a ValueError if the value can't
# actually be converted.
if type(value) != int:
new_value = int(value)
if self.give_warnings:
w = "Coercing {} into int ({})".format(value,new_value)
warnings.warn(w,Warning)
value = new_value
# Range check
if value > self.board.unsigned_long_max or value < self.board.unsigned_long_min:
err = "Value {} exceeds the size of the board's unsigned long.".format(value)
raise OverflowError(err)
return struct.pack(self.board.unsigned_long_type,value)
|
Convert a numerical value into an integer, then to a bytes object.
Check bounds for unsigned long.
|
def create_bot(self, name, avatar_url=None, callback_url=None, dm_notification=None,
**kwargs):
"""Create a new bot in a particular group.
:param str name: bot name
:param str avatar_url: the URL of an image to use as an avatar
:param str callback_url: a POST-back URL for each new message
:param bool dm_notification: whether to POST-back for direct messages?
:return: the new bot
:rtype: :class:`~groupy.api.bots.Bot`
"""
return self._bots.create(name=name, group_id=self.group_id,
avatar_url=avatar_url, callback_url=callback_url,
dm_notification=dm_notification)
|
Create a new bot in a particular group.
:param str name: bot name
:param str avatar_url: the URL of an image to use as an avatar
:param str callback_url: a POST-back URL for each new message
:param bool dm_notification: whether to POST-back for direct messages?
:return: the new bot
:rtype: :class:`~groupy.api.bots.Bot`
|
def dumps_tabledata(value, format_name="rst_grid_table", **kwargs):
"""
:param tabledata.TableData value: Tabular data to dump.
:param str format_name:
Dumped format name of tabular data.
Available formats are described in
:py:meth:`~pytablewriter.TableWriterFactory.create_from_format_name`
:Example:
.. code:: python
>>> dumps_tabledata(value)
.. table:: sample_data
====== ====== ======
attr_a attr_b attr_c
====== ====== ======
1 4.0 a
2 2.1 bb
3 120.9 ccc
====== ====== ======
"""
from ._factory import TableWriterFactory
if not value:
raise TypeError("value must be a tabledata.TableData instance")
writer = TableWriterFactory.create_from_format_name(format_name)
for attr_name, attr_value in kwargs.items():
setattr(writer, attr_name, attr_value)
writer.from_tabledata(value)
return writer.dumps()
|
:param tabledata.TableData value: Tabular data to dump.
:param str format_name:
Dumped format name of tabular data.
Available formats are described in
:py:meth:`~pytablewriter.TableWriterFactory.create_from_format_name`
:Example:
.. code:: python
>>> dumps_tabledata(value)
.. table:: sample_data
====== ====== ======
attr_a attr_b attr_c
====== ====== ======
1 4.0 a
2 2.1 bb
3 120.9 ccc
====== ====== ======
|
def radius(self):
'''
Radius of the ellipse, Point class.
'''
try:
return self._radius
except AttributeError:
pass
self._radius = Point(1, 1, 0)
return self._radius
|
Radius of the ellipse, Point class.
|
def location_based_search(self, lng, lat, distance, unit="miles", attribute_map=None, page=0, limit=50):
"""Search based on location and other attribute filters
:param long lng: Longitude parameter
:param long lat: Latitude parameter
:param int distance: The radius of the query
:param str unit: The unit of measure for the query, defaults to miles
:param dict attribute_map: Additional attributes to apply to the location bases query
:param int page: The page to return
:param int limit: Number of results per page
:returns: List of objects
:rtype: list
"""
#Determine what type of radian conversion you want base on a unit of measure
if unit == "miles":
distance = float(distance/69)
else:
distance = float(distance/111.045)
#Start with geospatial query
query = {
"loc" : {
"$within": {
"$center" : [[lng, lat], distance]}
}
}
#Allow querying additional attributes
if attribute_map:
query = dict(query.items() + attribute_map.items())
results = yield self.find(query, page=page, limit=limit)
raise Return(self._list_cursor_to_json(results))
|
Search based on location and other attribute filters
:param long lng: Longitude parameter
:param long lat: Latitude parameter
:param int distance: The radius of the query
:param str unit: The unit of measure for the query, defaults to miles
:param dict attribute_map: Additional attributes to apply to the location bases query
:param int page: The page to return
:param int limit: Number of results per page
:returns: List of objects
:rtype: list
|
def _add_helpingmaterials(config, helping_file, helping_type):
"""Add helping materials to a project."""
try:
project = find_project_by_short_name(config.project['short_name'],
config.pbclient,
config.all)
data = _load_data(helping_file, helping_type)
if len(data) == 0:
return ("Unknown format for the tasks file. Use json, csv, po or "
"properties.")
# Show progress bar
with click.progressbar(data, label="Adding Helping Materials") as pgbar:
for d in pgbar:
helping_info, file_path = create_helping_material_info(d)
if file_path:
# Create first the media object
hm = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info,
file_path=file_path)
check_api_error(hm)
z = hm.info.copy()
z.update(helping_info)
hm.info = z
response = config.pbclient.update_helping_material(hm)
check_api_error(response)
else:
response = config.pbclient.create_helpingmaterial(project_id=project.id,
info=helping_info)
check_api_error(response)
# Check if for the data we have to auto-throttle task creation
sleep, msg = enable_auto_throttling(config, data,
endpoint='/api/helpinmaterial')
# If true, warn user
if sleep: # pragma: no cover
click.secho(msg, fg='yellow')
# If auto-throttling enabled, sleep for sleep seconds
if sleep: # pragma: no cover
time.sleep(sleep)
return ("%s helping materials added to project: %s" % (len(data),
config.project['short_name']))
except exceptions.ConnectionError:
return ("Connection Error! The server %s is not responding" % config.server)
except (ProjectNotFound, TaskNotFound):
raise
|
Add helping materials to a project.
|
def ending_long_process(self, message=""):
"""
Clear main window's status bar and restore mouse cursor.
"""
QApplication.restoreOverrideCursor()
self.show_message(message, timeout=2000)
QApplication.processEvents()
|
Clear main window's status bar and restore mouse cursor.
|
def clone(name_a, name_b, **kwargs):
'''
Creates a clone of the given snapshot.
name_a : string
name of snapshot
name_b : string
name of filesystem or volume
create_parent : boolean
creates all the non-existing parent datasets. any property specified on the
command line using the -o option is ignored.
properties : dict
additional zfs properties (-o)
.. note::
ZFS properties can be specified at the time of creation of the filesystem by
passing an additional argument called "properties" and specifying the properties
with their respective values in the form of a python dictionary::
properties="{'property1': 'value1', 'property2': 'value2'}"
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.clone myzpool/mydataset@yesterday myzpool/mydataset_yesterday
'''
## Configure command
# NOTE: initialize the defaults
flags = []
target = []
# NOTE: push filesystem properties
filesystem_properties = kwargs.get('properties', {})
# NOTE: set extra config from kwargs
if kwargs.get('create_parent', False):
flags.append('-p')
# NOTE: update target
target.append(name_a)
target.append(name_b)
## Clone filesystem/volume
res = __salt__['cmd.run_all'](
__utils__['zfs.zfs_command'](
command='clone',
flags=flags,
filesystem_properties=filesystem_properties,
target=target,
),
python_shell=False,
)
return __utils__['zfs.parse_command_result'](res, 'cloned')
|
Creates a clone of the given snapshot.
name_a : string
name of snapshot
name_b : string
name of filesystem or volume
create_parent : boolean
creates all the non-existing parent datasets. any property specified on the
command line using the -o option is ignored.
properties : dict
additional zfs properties (-o)
.. note::
ZFS properties can be specified at the time of creation of the filesystem by
passing an additional argument called "properties" and specifying the properties
with their respective values in the form of a python dictionary::
properties="{'property1': 'value1', 'property2': 'value2'}"
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' zfs.clone myzpool/mydataset@yesterday myzpool/mydataset_yesterday
|
async def keep_alive(self, period=1, margin=.3):
"""
Periodically send a keep alive message to the Arduino.
Frequency of keep alive transmission is calculated as follows:
keep_alive_sent = period - (period * margin)
:param period: Time period between keepalives. Range is 0-10 seconds.
0 disables the keepalive mechanism.
:param margin: Safety margin to assure keepalives are sent before
period expires. Range is 0.1 to 0.9
:returns: No return value
"""
if period < 0:
period = 0
if period > 10:
period = 10
self.period = period
if margin < .1:
margin = .1
if margin > .9:
margin = .9
self.margin = margin
self.keep_alive_interval = [period & 0x7f, (period >> 7) & 0x7f]
await self._send_sysex(PrivateConstants.SAMPLING_INTERVAL,
self.keep_alive_interval)
while True:
if self.period:
await asyncio.sleep(period - (period - (period * margin)))
await self._send_sysex(PrivateConstants.KEEP_ALIVE,
self.keep_alive_interval)
else:
break
|
Periodically send a keep alive message to the Arduino.
Frequency of keep alive transmission is calculated as follows:
keep_alive_sent = period - (period * margin)
:param period: Time period between keepalives. Range is 0-10 seconds.
0 disables the keepalive mechanism.
:param margin: Safety margin to assure keepalives are sent before
period expires. Range is 0.1 to 0.9
:returns: No return value
|
def run(self, debug=False, reload=None):
"""
Convenience method for running bots in getUpdates mode
:param bool debug: Enable debug logging and automatic reloading
:param bool reload: Automatically reload bot on code change
:Example:
>>> if __name__ == '__main__':
>>> bot.run()
"""
loop = asyncio.get_event_loop()
logging.basicConfig(level=logging.DEBUG if debug else logging.INFO)
if reload is None:
reload = debug
bot_loop = asyncio.ensure_future(self.loop())
try:
if reload:
loop.run_until_complete(run_with_reloader(loop, bot_loop, self.stop))
else:
loop.run_until_complete(bot_loop)
# User cancels
except KeyboardInterrupt:
logger.debug("User cancelled")
bot_loop.cancel()
self.stop()
# Stop loop
finally:
if AIOHTTP_23:
loop.run_until_complete(self.session.close())
logger.debug("Closing loop")
loop.stop()
loop.close()
|
Convenience method for running bots in getUpdates mode
:param bool debug: Enable debug logging and automatic reloading
:param bool reload: Automatically reload bot on code change
:Example:
>>> if __name__ == '__main__':
>>> bot.run()
|
def update(self, friendly_name=None, description=None, expiry=None, schema=None):
""" Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema.
"""
self._load_info()
if friendly_name is not None:
self._info['friendlyName'] = friendly_name
if description is not None:
self._info['description'] = description
if expiry is not None:
if isinstance(expiry, datetime.datetime):
expiry = calendar.timegm(expiry.utctimetuple()) * 1000
self._info['expirationTime'] = expiry
if schema is not None:
if isinstance(schema, _schema.Schema):
schema = schema._bq_schema
self._info['schema'] = {'fields': schema}
try:
self._api.table_update(self._name_parts, self._info)
except datalab.utils.RequestException:
# The cached metadata is out of sync now; abandon it.
self._info = None
except Exception as e:
raise e
|
Selectively updates Table information.
Any parameters that are omitted or None are not updated.
Args:
friendly_name: if not None, the new friendly name.
description: if not None, the new description.
expiry: if not None, the new expiry time, either as a DateTime or milliseconds since epoch.
schema: if not None, the new schema: either a list of dictionaries or a Schema.
|
def _do_functions(self, rule, p_selectors, p_parents, p_children, scope, media, c_lineno, c_property, c_codestr, code, name):
"""
Implements @mixin and @function
"""
if name:
funct, params, _ = name.partition('(')
funct = funct.strip()
params = split_params(depar(params + _))
defaults = {}
new_params = []
for param in params:
param, _, default = param.partition(':')
param = param.strip()
default = default.strip()
if param:
new_params.append(param)
if default:
default = self.apply_vars(
default, rule[CONTEXT], None, rule)
defaults[param] = default
context = rule[CONTEXT].copy()
for p in new_params:
context.pop(p, None)
mixin = [list(new_params), defaults, self.
apply_vars(c_codestr, context, None, rule)]
if code == '@function':
def _call(mixin):
def __call(R, *args, **kwargs):
m_params = mixin[0]
m_vars = rule[CONTEXT].copy()
m_vars.update(mixin[1])
m_codestr = mixin[2]
for i, a in enumerate(args):
m_vars[m_params[i]] = a
m_vars.update(kwargs)
_options = rule[OPTIONS].copy()
_rule = spawn_rule(R, codestr=m_codestr, context=m_vars, options=_options, deps=set(), properties=[], final=False, lineno=c_lineno)
self.manage_children(_rule, p_selectors, p_parents,
p_children, (scope or '') + '', R[MEDIA])
ret = _rule[OPTIONS].pop('@return', '')
return ret
return __call
_mixin = _call(mixin)
_mixin.mixin = mixin
mixin = _mixin
# Insert as many @mixin options as the default parameters:
while len(new_params):
rule[OPTIONS]['%s %s:%d' % (code, funct,
len(new_params))] = mixin
param = new_params.pop()
if param not in defaults:
break
if not new_params:
rule[OPTIONS][code + ' ' + funct + ':0'] = mixin
|
Implements @mixin and @function
|
def check_work_done(self, grp):
"""
Check for the existence of alignment and result files.
"""
id_ = self.get_id(grp)
concat_file = os.path.join(self.cache_dir, '{}.phy'.format(id_))
result_file = os.path.join(self.cache_dir, '{}.{}.json'.format(id_, self.task_interface.name))
return os.path.exists(concat_file), os.path.exists(result_file)
|
Check for the existence of alignment and result files.
|
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return bytes()
def find_cookie(line):
try:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found:
if codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
|
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present, but
disagree, a SyntaxError will be raised. If the encoding cookie is an invalid
charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
|
def write_stilde(self, stilde_dict, group=None):
"""Writes stilde for each IFO to file.
Parameters
-----------
stilde : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
"""
subgroup = self.data_group + "/{ifo}/stilde"
if group is None:
group = subgroup
else:
group = '/'.join([group, subgroup])
for ifo, stilde in stilde_dict.items():
self[group.format(ifo=ifo)] = stilde
self[group.format(ifo=ifo)].attrs['delta_f'] = stilde.delta_f
self[group.format(ifo=ifo)].attrs['epoch'] = float(stilde.epoch)
|
Writes stilde for each IFO to file.
Parameters
-----------
stilde : {dict, FrequencySeries}
A dict of FrequencySeries where the key is the IFO.
group : {None, str}
The group to write the strain to. If None, will write to the top
level.
|
def update(self, feedforwardInputI, feedforwardInputE, v, recurrent=True,
envelope=False, iSpeedTuning=False, enforceDale=True):
"""
Do one update of the CAN network, of length self.dt.
:param feedforwardInputI: The feedforward input to inhibitory cells.
:param feedforwardInputR: The feedforward input to excitatory cells.
:param placeActivity: Activity of the place code.
:param v: The current velocity.
:param recurrent: Whether or not recurrent connections should be used.
:param envelope: Whether or not an envelope should be applied.
:param iSpeedTuning: Whether or not inhibitory cells should also have their
activations partially depend on current movement speed. This is
necessary for periodic training, serving a role similar to that of
the envelope.
:param Whether or not Dale's law should be enforced locally. Helps with
training with recurrent weights active, but can slow down training.
"""
np.matmul(self.activationsP * self.placeGainI, self.weightsPI,
self.instantaneousI)
np.matmul(self.activationsP* self.placeGainE, self.weightsPEL,
self.instantaneousEL)
np.matmul(self.activationsP * self.placeGainE, self.weightsPER,
self.instantaneousER)
self.instantaneousI += self.boostEffectI*\
self.activationHistoryI +\
feedforwardInputI
self.instantaneousEL += self.boostEffectE*\
self.activationHistoryEL +\
feedforwardInputE
self.instantaneousER += self.boostEffectE*\
self.activationHistoryER +\
feedforwardInputE
if enforceDale:
weightsII = np.minimum(self.weightsII, 0)
weightsIER = np.minimum(self.weightsIER, 0)
weightsIEL = np.minimum(self.weightsIEL, 0)
weightsELI = np.maximum(self.weightsELI, 0)
weightsERI = np.maximum(self.weightsERI, 0)
else:
weightsII = self.weightsII
weightsIER = self.weightsIER
weightsIEL = self.weightsIEL
weightsELI = self.weightsELI
weightsERI = self.weightsERI
if recurrent:
self.instantaneousI += (np.matmul(self.activationsEL, weightsELI) +\
np.matmul(self.activationsER, weightsERI) +\
np.matmul(self.activationsI, weightsII))
self.instantaneousEL += np.matmul(self.activationsI, weightsIEL)
self.instantaneousER += np.matmul(self.activationsI, weightsIER)
self.instantaneousI += self.tonicMagnitude
self.instantaneousEL += self.tonicMagnitude
self.instantaneousER += self.tonicMagnitude
self.instantaneousEL *= max((1 - self.velocityGain*v), 0)
self.instantaneousER *= max((1 + self.velocityGain*v), 0)
if iSpeedTuning:
self.instantaneousI *= min(self.velocityGain*np.abs(v), 1)
if envelope:
self.instantaneousI *= self.envelopeI
self.instantaneousER *= self.envelopeE
self.instantaneousEL *= self.envelopeE
# Input must be positive.
np.maximum(self.instantaneousI, 0., self.instantaneousI)
np.maximum(self.instantaneousEL, 0., self.instantaneousEL)
np.maximum(self.instantaneousER, 0., self.instantaneousER)
# Activity decay and timestep adjustment
self.activationsI += (self.instantaneousI - self.activationsI/self.decayConstant)*self.dt
self.activationsEL += (self.instantaneousEL - self.activationsEL/self.decayConstant)*self.dt
self.activationsER += (self.instantaneousER - self.activationsER/self.decayConstant)*self.dt
# Finally, clip activations for stability
np.minimum(self.activationsI, self.clip, self.activationsI)
np.minimum(self.activationsEL, self.clip, self.activationsEL)
np.minimum(self.activationsER, self.clip, self.activationsER)
self.activationHistoryI += (-self.activationsI + np.sum(self.activationsI)/np.sum(self.envelopeI))*self.dt
self.activationHistoryEL += (-self.activationsEL + np.sum(self.activationsEL)/np.sum(self.envelopeE))*self.dt
self.activationHistoryER += (-self.activationsER + np.sum(self.activationsER)/np.sum(self.envelopeE))*self.dt
self.activationHistoryI -= self.dt*self.activationHistoryI/self.alpha
self.activationHistoryEL -= self.dt*self.activationHistoryEL/self.alpha
self.activationHistoryER -= self.dt*self.activationHistoryER/self.alpha
|
Do one update of the CAN network, of length self.dt.
:param feedforwardInputI: The feedforward input to inhibitory cells.
:param feedforwardInputR: The feedforward input to excitatory cells.
:param placeActivity: Activity of the place code.
:param v: The current velocity.
:param recurrent: Whether or not recurrent connections should be used.
:param envelope: Whether or not an envelope should be applied.
:param iSpeedTuning: Whether or not inhibitory cells should also have their
activations partially depend on current movement speed. This is
necessary for periodic training, serving a role similar to that of
the envelope.
:param Whether or not Dale's law should be enforced locally. Helps with
training with recurrent weights active, but can slow down training.
|
def run_script(self,
script,
shutit_pexpect_child=None,
in_shell=True,
echo=None,
note=None,
loglevel=logging.DEBUG):
"""Run the passed-in string as a script on the target's command line.
@param script: String representing the script. It will be de-indented
and stripped before being run.
@param shutit_pexpect_child: See send()
@param in_shell: Indicate whether we are in a shell or not. (Default: True)
@param note: See send()
@type script: string
@type in_shell: boolean
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.run_script(script,
in_shell=in_shell,
echo=echo,
note=note,
loglevel=loglevel)
|
Run the passed-in string as a script on the target's command line.
@param script: String representing the script. It will be de-indented
and stripped before being run.
@param shutit_pexpect_child: See send()
@param in_shell: Indicate whether we are in a shell or not. (Default: True)
@param note: See send()
@type script: string
@type in_shell: boolean
|
def parse(self, data, path=None):
"""
Args:
data (str): Raw specification text.
path (Optional[str]): Path to specification on filesystem. Only
used to tag tokens with the file they originated from.
"""
assert not self.exhausted, 'Must call get_parser() to reset state.'
self.path = path
parsed_data = self.yacc.parse(data, lexer=self.lexer, debug=self.debug)
# It generally makes sense for lexer errors to come first, because
# those can be the root of parser errors. Also, since we only show one
# error max right now, it's best to show the lexing one.
for err_msg, lineno in self.lexer.errors[::-1]:
self.errors.insert(0, (err_msg, lineno, self.path))
parsed_data.extend(self.anony_defs)
self.exhausted = True
return parsed_data
|
Args:
data (str): Raw specification text.
path (Optional[str]): Path to specification on filesystem. Only
used to tag tokens with the file they originated from.
|
def swap_dims(self, dims_dict, inplace=None):
"""Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a variable in the
dataset.
inplace : bool, optional
If True, swap dimensions in-place. Otherwise, return a new dataset
object.
Returns
-------
renamed : Dataset
Dataset with swapped dimensions.
See Also
--------
Dataset.rename
DataArray.swap_dims
"""
# TODO: deprecate this method in favor of a (less confusing)
# rename_dims() method that only renames dimensions.
inplace = _check_inplace(inplace)
for k, v in dims_dict.items():
if k not in self.dims:
raise ValueError('cannot swap from dimension %r because it is '
'not an existing dimension' % k)
if self.variables[v].dims != (k,):
raise ValueError('replacement dimension %r is not a 1D '
'variable along the old dimension %r'
% (v, k))
result_dims = set(dims_dict.get(dim, dim) for dim in self.dims)
coord_names = self._coord_names.copy()
coord_names.update(dims_dict.values())
variables = OrderedDict()
indexes = OrderedDict()
for k, v in self.variables.items():
dims = tuple(dims_dict.get(dim, dim) for dim in v.dims)
if k in result_dims:
var = v.to_index_variable()
if k in self.indexes:
indexes[k] = self.indexes[k]
else:
indexes[k] = var.to_index()
else:
var = v.to_base_variable()
var.dims = dims
variables[k] = var
return self._replace_with_new_dims(variables, coord_names,
indexes=indexes, inplace=inplace)
|
Returns a new object with swapped dimensions.
Parameters
----------
dims_dict : dict-like
Dictionary whose keys are current dimension names and whose values
are new names. Each value must already be a variable in the
dataset.
inplace : bool, optional
If True, swap dimensions in-place. Otherwise, return a new dataset
object.
Returns
-------
renamed : Dataset
Dataset with swapped dimensions.
See Also
--------
Dataset.rename
DataArray.swap_dims
|
def get_record_params(args):
"""Get record parameters from command options.
Argument:
args: arguments object
"""
name, rtype, content, ttl, priority = (
args.name, args.rtype, args.content, args.ttl, args.priority)
return name, rtype, content, ttl, priority
|
Get record parameters from command options.
Argument:
args: arguments object
|
def render_json_response(self, context_dict, status=200):
"""
Limited serialization for shipping plain data. Do not use for models
or other complex or custom objects.
"""
json_context = json.dumps(
context_dict,
cls=DjangoJSONEncoder,
**self.get_json_dumps_kwargs()
).encode(u'utf-8')
return HttpResponse(
json_context,
content_type=self.get_content_type(),
status=status
)
|
Limited serialization for shipping plain data. Do not use for models
or other complex or custom objects.
|
def do_gen(argdict):
'''Generate the whole site.'''
site = make_site_obj(argdict)
try:
st = time.time()
site.generate()
et = time.time()
print "Generated Site in %f seconds."% (et-st)
except ValueError as e: # pragma: no cover
print "Cannot generate. You are not within a simplystatic \
tree and you didn't specify a directory."
|
Generate the whole site.
|
def template_substitute(text, **kwargs):
"""
Replace placeholders in text by using the data mapping.
Other placeholders that is not represented by data is left untouched.
:param text: Text to search and replace placeholders.
:param data: Data mapping/dict for placeholder key and values.
:return: Potentially modified text with replaced placeholders.
"""
for name, value in kwargs.items():
placeholder_pattern = "{%s}" % name
if placeholder_pattern in text:
text = text.replace(placeholder_pattern, value)
return text
|
Replace placeholders in text by using the data mapping.
Other placeholders that is not represented by data is left untouched.
:param text: Text to search and replace placeholders.
:param data: Data mapping/dict for placeholder key and values.
:return: Potentially modified text with replaced placeholders.
|
def copy_from(self,
container: Container,
fn_container: str,
fn_host: str
) -> None:
"""
Copies a given file from the container to a specified location on the
host machine.
"""
logger.debug("Copying file from container, %s: %s -> %s",
container.uid, fn_container, fn_host)
cmd = "docker cp '{}:{}' '{}'".format(container.id, fn_container, fn_host)
try:
subprocess.check_output(cmd, shell=True)
logger.debug("Copied file from container, %s: %s -> %s",
container.uid, fn_container, fn_host)
# TODO implement error handling
except subprocess.CalledProcessError:
logger.exception("Failed to copy file from container, %s: %s -> %s", # noqa: pycodestyle
container.uid, fn_container, fn_host)
raise
|
Copies a given file from the container to a specified location on the
host machine.
|
def todate(val):
'''Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
'''
if not val:
raise ValueError("Value not provided")
if isinstance(val, datetime):
return val.date()
elif isinstance(val, date):
return val
else:
try:
ival = int(val)
sval = str(ival)
if len(sval) == 8:
return yyyymmdd2date(val)
elif len(sval) == 5:
return juldate2date(val)
else:
raise ValueError
except Exception:
# Try to convert using the parsing algorithm
try:
return date_from_string(val).date()
except Exception:
raise ValueError("Could not convert %s to date" % val)
|
Convert val to a datetime.date instance by trying several
conversion algorithm.
If it fails it raise a ValueError exception.
|
def fetchGroupInfo(self, *group_ids):
"""
Get groups' info from IDs, unordered
:param group_ids: One or more group ID(s) to query
:return: :class:`models.Group` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed
"""
threads = self.fetchThreadInfo(*group_ids)
groups = {}
for id_, thread in threads.items():
if thread.type == ThreadType.GROUP:
groups[id_] = thread
else:
raise FBchatUserError("Thread {} was not a group".format(thread))
return groups
|
Get groups' info from IDs, unordered
:param group_ids: One or more group ID(s) to query
:return: :class:`models.Group` objects, labeled by their ID
:rtype: dict
:raises: FBchatException if request failed
|
def request(self, path,
args=[], files=[], opts={}, stream=False,
decoder=None, headers={}, data=None):
"""Makes an HTTP request to the IPFS daemon.
This function returns the contents of the HTTP response from the IPFS
daemon.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : :class:`io.RawIOBase` | :obj:`str` | :obj:`list`
The file object(s) or path(s) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
kwargs : dict
Additional arguments to pass to :mod:`requests`
"""
url = self.base + path
params = []
params.append(('stream-channels', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'post' if (files or data) else 'get'
parser = encoding.get_encoding(decoder if decoder else "none")
return self._request(method, url, params, parser, stream,
files, headers, data)
|
Makes an HTTP request to the IPFS daemon.
This function returns the contents of the HTTP response from the IPFS
daemon.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
args : list
Positional parameters to be sent along with the HTTP request
files : :class:`io.RawIOBase` | :obj:`str` | :obj:`list`
The file object(s) or path(s) to stream to the daemon
opts : dict
Query string paramters to be sent along with the HTTP request
decoder : str
The encoder to use to parse the HTTP response
kwargs : dict
Additional arguments to pass to :mod:`requests`
|
def randomSize(cls, widthLimits, heightLimits, origin=None):
'''
:param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
'''
r = cls(0, 0, origin)
r.w = random.randint(widthLimits[0], widthLimits[1])
r.h = random.randint(heightLimits[0], heightLimits[1])
return r
|
:param: widthLimits - iterable of integers with length >= 2
:param: heightLimits - iterable of integers with length >= 2
:param: origin - optional Point subclass
:return: Rectangle
|
def load(self, **kwargs):
"""Loads a given resource
Loads a given resource provided a 'name' and an optional 'slot'
parameter. The 'slot' parameter is not a required load parameter
because it is provided as an optional way of constructing the
correct 'name' of the vCMP resource.
:param kwargs:
:return:
"""
kwargs['transform_name'] = True
kwargs = self._mutate_name(kwargs)
return self._load(**kwargs)
|
Loads a given resource
Loads a given resource provided a 'name' and an optional 'slot'
parameter. The 'slot' parameter is not a required load parameter
because it is provided as an optional way of constructing the
correct 'name' of the vCMP resource.
:param kwargs:
:return:
|
def count_generator(generator, memory_efficient=True):
"""Count number of item in generator.
memory_efficient=True, 3 times slower, but memory_efficient.
memory_efficient=False, faster, but cost more memory.
"""
if memory_efficient:
counter = 0
for _ in generator:
counter += 1
return counter
else:
return len(list(generator))
|
Count number of item in generator.
memory_efficient=True, 3 times slower, but memory_efficient.
memory_efficient=False, faster, but cost more memory.
|
def parse(self, argv, tokenizer=DefaultTokenizer):
"""
Parse command line to out tree
:type argv object
:type tokenizer AbstractTokenizer
"""
args = tokenizer.tokenize(argv)
_lang = tokenizer.language_definition()
#
# for param in self.__args:
# if self._is_default_arg(param):
# self.__out_tree[self.__default_arg_tag].append(param.strip())
# else:
# param = param.lstrip("-").partition('=')
# if len(param) == 3:
# self.__parse_one_param(param)
pass
|
Parse command line to out tree
:type argv object
:type tokenizer AbstractTokenizer
|
def ready(self):
"""Sets up the application after startup."""
self.log('Got', len(schemastore), 'data and',
len(configschemastore), 'component schemata.', lvl=debug)
|
Sets up the application after startup.
|
def listunion(ListOfLists):
"""
Take the union of a list of lists.
Take a Python list of Python lists::
[[l11,l12, ...], [l21,l22, ...], ... , [ln1, ln2, ...]]
and return the aggregated list::
[l11,l12, ..., l21, l22 , ...]
For a list of two lists, e.g. `[a, b]`, this is like::
a.extend(b)
**Parameters**
**ListOfLists** : Python list
Python list of Python lists.
**Returns**
**u** : Python list
Python list created by taking the union of the
lists in `ListOfLists`.
"""
u = []
for s in ListOfLists:
if s != None:
u.extend(s)
return u
|
Take the union of a list of lists.
Take a Python list of Python lists::
[[l11,l12, ...], [l21,l22, ...], ... , [ln1, ln2, ...]]
and return the aggregated list::
[l11,l12, ..., l21, l22 , ...]
For a list of two lists, e.g. `[a, b]`, this is like::
a.extend(b)
**Parameters**
**ListOfLists** : Python list
Python list of Python lists.
**Returns**
**u** : Python list
Python list created by taking the union of the
lists in `ListOfLists`.
|
def is_blocking_notifications(self, notification_period, hosts, services, n_type, t_wished):
# pylint: disable=too-many-return-statements
"""Check if a notification is blocked by the service.
Conditions are ONE of the following::
* enable_notification is False (global)
* not in a notification_period
* notifications_enable is False (local)
* notification_options is 'n' or matches the state ('UNKNOWN' <=> 'u' ...)
(include flapping and downtimes)
* state goes ok and type is 'ACKNOWLEDGEMENT' (no sense)
* scheduled_downtime_depth > 0 and flapping (host is in downtime)
* scheduled_downtime_depth > 1 and not downtime end (deep downtime)
* scheduled_downtime_depth > 0 and problem or recovery (host is in downtime)
* SOFT state of a problem (we raise notification ony on HARD state)
* ACK notification when already ACK (don't raise again ACK)
* not flapping notification in a flapping state
* business rule smart notifications is enabled and all its children have been acknowledged
or are under downtime
* linked host is not up
* linked host is in downtime
:param n_type: notification type
:type n_type:
:param t_wished: the time we should like to notify the host (mostly now)
:type t_wished: float
:return: True if ONE of the above condition was met, otherwise False
:rtype: bool
TODO: Refactor this, a lot of code duplication with Host.is_blocking_notifications
"""
logger.debug("Checking if a service %s (%s) notification is blocked...",
self.get_full_name(), self.state)
host = hosts[self.host]
if t_wished is None:
t_wished = time.time()
# TODO
# forced notification
# pass if this is a custom notification
# Block if notifications are program-wide disabled
# Block if notifications are disabled for this service
# Block if the current status is in the notification_options w,u,c,r,f,s
if not self.enable_notifications or \
not self.notifications_enabled or \
'n' in self.notification_options:
logger.debug("Service: %s, notification %s sending is blocked by configuration",
self.get_name(), n_type)
return True
# Does the notification period allow sending out this notification?
if notification_period is not None and not notification_period.is_time_valid(t_wished):
logger.debug("Service: %s, notification %s sending is blocked by globals",
self.get_name(), n_type)
return True
if n_type in (u'PROBLEM', u'RECOVERY') and (
self.state == u'UNKNOWN' and 'u' not in self.notification_options or
self.state == u'WARNING' and 'w' not in self.notification_options or
self.state == u'CRITICAL' and 'c' not in self.notification_options or
self.state == u'OK' and 'r' not in self.notification_options or
self.state == u'UNREACHABLE' and 'x' not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
self.get_name(), n_type, self.notification_options)
return True
if (n_type in [u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED'] and
'f' not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
n_type, self.get_full_name(), self.notification_options)
return True
if (n_type in [u'DOWNTIMESTART', u'DOWNTIMEEND', u'DOWNTIMECANCELLED'] and
's' not in self.notification_options):
logger.debug("Service: %s, notification %s sending is blocked by options: %s",
n_type, self.get_full_name(), self.notification_options)
return True
# Acknowledgements make no sense when the status is ok/up
if n_type in [u'ACKNOWLEDGEMENT'] and self.state == self.ok_up:
logger.debug("Host: %s, notification %s sending is blocked by current state",
self.get_name(), n_type)
return True
# Block if host is in a scheduled downtime
if host.scheduled_downtime_depth > 0:
logger.debug("Service: %s, notification %s sending is blocked by downtime",
self.get_name(), n_type)
return True
# When in deep downtime, only allow end-of-downtime notifications
# In depth 1 the downtime just started and can be notified
if self.scheduled_downtime_depth > 1 and n_type not in (u'DOWNTIMEEND',
u'DOWNTIMECANCELLED'):
logger.debug("Service: %s, notification %s sending is blocked by deep downtime",
self.get_name(), n_type)
return True
# Block if in a scheduled downtime and a problem arises, or flapping event
if self.scheduled_downtime_depth > 0 and n_type in \
[u'PROBLEM', u'RECOVERY', u'ACKNOWLEDGEMENT',
u'FLAPPINGSTART', u'FLAPPINGSTOP', u'FLAPPINGDISABLED']:
logger.debug("Service: %s, notification %s sending is blocked by downtime",
self.get_name(), n_type)
return True
# Block if the status is SOFT
# Block if the problem has already been acknowledged
# Block if flapping
# Block if host is down
if self.state_type == u'SOFT' and n_type == u'PROBLEM' or \
self.problem_has_been_acknowledged and n_type != u'ACKNOWLEDGEMENT' or \
self.is_flapping and n_type not in [u'FLAPPINGSTART',
u'FLAPPINGSTOP',
u'FLAPPINGDISABLED'] or \
host.state != host.ok_up:
logger.debug("Service: %s, notification %s sending is blocked by soft state, "
"acknowledgement, flapping or host DOWN", self.get_name(), n_type)
return True
# Block if business rule smart notifications is enabled and all its
# children have been acknowledged or are under downtime.
if self.got_business_rule is True \
and self.business_rule_smart_notifications is True \
and self.business_rule_notification_is_blocked(hosts, services) is True \
and n_type == u'PROBLEM':
logger.debug("Service: %s, notification %s sending is blocked by business rules",
self.get_name(), n_type)
return True
logger.debug("Service: %s, notification %s sending is not blocked", self.get_name(), n_type)
return False
|
Check if a notification is blocked by the service.
Conditions are ONE of the following::
* enable_notification is False (global)
* not in a notification_period
* notifications_enable is False (local)
* notification_options is 'n' or matches the state ('UNKNOWN' <=> 'u' ...)
(include flapping and downtimes)
* state goes ok and type is 'ACKNOWLEDGEMENT' (no sense)
* scheduled_downtime_depth > 0 and flapping (host is in downtime)
* scheduled_downtime_depth > 1 and not downtime end (deep downtime)
* scheduled_downtime_depth > 0 and problem or recovery (host is in downtime)
* SOFT state of a problem (we raise notification ony on HARD state)
* ACK notification when already ACK (don't raise again ACK)
* not flapping notification in a flapping state
* business rule smart notifications is enabled and all its children have been acknowledged
or are under downtime
* linked host is not up
* linked host is in downtime
:param n_type: notification type
:type n_type:
:param t_wished: the time we should like to notify the host (mostly now)
:type t_wished: float
:return: True if ONE of the above condition was met, otherwise False
:rtype: bool
TODO: Refactor this, a lot of code duplication with Host.is_blocking_notifications
|
def traverse_tree_recursive(odb, tree_sha, path_prefix):
"""
:return: list of entries of the tree pointed to by the binary tree_sha. An entry
has the following format:
* [0] 20 byte sha
* [1] mode as int
* [2] path relative to the repository
:param path_prefix: prefix to prepend to the front of all returned paths"""
entries = []
data = tree_entries_from_data(odb.stream(tree_sha).read())
# unpacking/packing is faster than accessing individual items
for sha, mode, name in data:
if S_ISDIR(mode):
entries.extend(traverse_tree_recursive(odb, sha, path_prefix + name + '/'))
else:
entries.append((sha, mode, path_prefix + name))
# END for each item
return entries
|
:return: list of entries of the tree pointed to by the binary tree_sha. An entry
has the following format:
* [0] 20 byte sha
* [1] mode as int
* [2] path relative to the repository
:param path_prefix: prefix to prepend to the front of all returned paths
|
def collect(self):
"""Collect number values from db.serverStatus() and db.engineStatus()"""
if pymongo is None:
self.log.error('Unable to import pymongo')
return
# we need this for backwards compatibility
if 'host' in self.config:
self.config['hosts'] = [self.config['host']]
# convert network_timeout to integer
if self.config['network_timeout']:
self.config['network_timeout'] = int(
self.config['network_timeout'])
# use auth if given
if 'user' in self.config:
user = self.config['user']
else:
user = None
if 'passwd' in self.config:
passwd = self.config['passwd']
else:
passwd = None
for host in self.config['hosts']:
if len(self.config['hosts']) == 1:
# one host only, no need to have a prefix
base_prefix = []
else:
matches = re.search('((.+)\@)?(.+)?', host)
alias = matches.group(2)
host = matches.group(3)
if alias is None:
base_prefix = [re.sub('[:\.]', '_', host)]
else:
base_prefix = [alias]
try:
if ReadPreference is None:
conn = pymongo.Connection(
host,
network_timeout=self.config['network_timeout'],
slave_okay=True
)
else:
conn = pymongo.Connection(
host,
network_timeout=self.config['network_timeout'],
read_preference=ReadPreference.SECONDARY,
)
except Exception as e:
self.log.error('Couldnt connect to mongodb: %s', e)
continue
# try auth
if user:
try:
conn.admin.authenticate(user, passwd)
except Exception as e:
self.log.error(
'User auth given, but could not autheticate' +
' with host: %s, err: %s' % (host, e))
return{}
serverStatus = conn.db.command('serverStatus')
engineStatus = conn.db.command('engineStatus')
data = dict(serverStatus.items() + engineStatus.items())
self._publish_transformed(data, base_prefix)
if str_to_bool(self.config['simple']):
data = self._extract_simple_data(data)
self._publish_dict_with_prefix(data, base_prefix)
db_name_filter = re.compile(self.config['databases'])
ignored_collections = re.compile(self.config['ignore_collections'])
for db_name in conn.database_names():
if not db_name_filter.search(db_name):
continue
db_stats = conn[db_name].command('dbStats')
db_prefix = base_prefix + ['databases', db_name]
self._publish_dict_with_prefix(db_stats, db_prefix)
for collection_name in conn[db_name].collection_names():
if ignored_collections.search(collection_name):
continue
collection_stats = conn[db_name].command('collstats',
collection_name)
if str_to_bool(self.config['translate_collections']):
collection_name = collection_name.replace('.', '_')
collection_prefix = db_prefix + [collection_name]
self._publish_dict_with_prefix(collection_stats,
collection_prefix)
|
Collect number values from db.serverStatus() and db.engineStatus()
|
def _speak_as(
self,
element,
regular_expression,
data_property_value,
operation
):
"""
Execute a operation by regular expression for element only.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param regular_expression: The regular expression.
:type regular_expression: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:param operation: The operation to be executed.
:type operation: function
"""
children = []
pattern = re.compile(regular_expression)
content = element.get_text_content()
while content:
matches = pattern.search(content)
if matches is not None:
index = matches.start()
children = operation(content, index, children)
new_index = index + 1
content = content[new_index:]
else:
break
if children:
if content:
children.append(self._create_content_element(
content,
data_property_value
))
while element.has_children():
element.get_first_node_child().remove_node()
for child in children:
element.append_element(child)
|
Execute a operation by regular expression for element only.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
:param regular_expression: The regular expression.
:type regular_expression: str
:param data_property_value: The value of custom attribute used to
identify the fix.
:type data_property_value: str
:param operation: The operation to be executed.
:type operation: function
|
def setDevice(self, device):
"""Sets the video stream
:param device: A rather generic device class. In this case DataModel.RTSPCameraDevice.
"""
print(self.pre, "setDevice :", device)
if (not device and not self.device): # None can be passed as an argument when the device has not been set yet
return
if (self.device):
if self.device == device:
print(self.pre, "setDevice : same device")
return
if self.filterchain: # there's video already
self.clearDevice()
self.device = device
self.video.setDevice(self.device) # inform the video widget so it can start drags
# ManagedFilterChain.addViewPort accepts ViewPort instance
self.filterchain = self.filterchain_group.get(_id = self.device._id)
if self.filterchain:
self.viewport.setXScreenNum(self.n_xscreen)
self.viewport.setWindowId (int(self.video.winId()))
self.filterchain.addViewPort(self.viewport)
|
Sets the video stream
:param device: A rather generic device class. In this case DataModel.RTSPCameraDevice.
|
def uploadFile(self, filename, ispickle=False, athome=False):
"""
Uploads a single file to Redunda.
:param str filename: The name of the file to upload
:param bool ispickle: Optional variable to be set to True is the file is a pickle; default is False.
:returns: returns nothing
"""
print("Uploading file {} to Redunda.".format(filename))
_, tail = os.path.split(filename)
url = "https://redunda.sobotics.org/bots/data/{}?key={}".format(tail, self.key)
#Set the content type to 'application/octet-stream'
header = {"Content-type": "application/octet-stream"}
filedata = ""
if athome:
filename = str(os.path.expanduser("~")) + filename
#Read the data from a file to a string.
if filename.endswith(".pickle") or ispickle:
try:
with open(filename, "rb") as fileToRead:
data = pickle.load(fileToRead)
except pickle.PickleError as perr:
print("Pickling error occurred: {}".format(perr))
return
filedata = json.dumps(data)
else:
try:
with open(filename, "r") as fileToRead:
filedata = fileToRead.read()
except IOError as ioerr:
print("IOError occurred: {}".format(ioerr))
return
requestToMake = request.Request(url, data=filedata.encode("utf-8"), headers=header)
#Make the request.
response = request.urlopen(requestToMake)
if response.code >= 400:
print("Error occurred while uploading file '{}' with error code {}.".format(filename,response.code))
|
Uploads a single file to Redunda.
:param str filename: The name of the file to upload
:param bool ispickle: Optional variable to be set to True is the file is a pickle; default is False.
:returns: returns nothing
|
def __parse_domain_to_employer_stream(self, stream):
"""Parse domain to employer stream.
Each line of the stream has to contain a domain and a organization,
or employer, separated by tabs. Comment lines start with the hash
character (#)
Example:
# Domains from domains.txt
example.org Example
example.com Example
bitergia.com Bitergia
libresoft.es LibreSoft
example.org LibreSoft
"""
if not stream:
return
f = self.__parse_domain_to_employer_line
for o in self.__parse_stream(stream, f):
org = o[0]
dom = o[1]
if org not in self.__raw_orgs:
self.__raw_orgs[org] = []
self.__raw_orgs[org].append(dom)
|
Parse domain to employer stream.
Each line of the stream has to contain a domain and a organization,
or employer, separated by tabs. Comment lines start with the hash
character (#)
Example:
# Domains from domains.txt
example.org Example
example.com Example
bitergia.com Bitergia
libresoft.es LibreSoft
example.org LibreSoft
|
def getargs():
from argparse import ArgumentParser
'''
Return arguments
'''
parser = ArgumentParser(description='Answer yes or no to a question.')
parser.add_argument("question", type=str, help="A question to ask.")
return parser.parse_args()
|
Return arguments
|
def get_commits_since(check_name, target_tag=None):
"""
Get the list of commits from `target_tag` to `HEAD` for the given check
"""
root = get_root()
target_path = os.path.join(root, check_name)
command = 'git log --pretty=%s {}{}'.format('' if target_tag is None else '{}... '.format(target_tag), target_path)
with chdir(root):
return run_command(command, capture=True).stdout.splitlines()
|
Get the list of commits from `target_tag` to `HEAD` for the given check
|
def register(self,flag):
"""Register a new :class:`Flag` instance with the Flags registry."""
super(Flags,self).__setitem__(flag.name,flag)
|
Register a new :class:`Flag` instance with the Flags registry.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.