code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def paged_object_to_list(paged_object):
'''
Extract all pages within a paged object as a list of dictionaries
'''
paged_return = []
while True:
try:
page = next(paged_object)
paged_return.append(page.as_dict())
except CloudError:
raise
except StopIteration:
break
return paged_return
|
Extract all pages within a paged object as a list of dictionaries
|
def replace_name(file_path, new_name):
''' Change the file name in a path but keep the extension '''
if not file_path:
raise Exception("File path cannot be empty")
elif not new_name:
raise Exception("New name cannot be empty")
dirname = os.path.dirname(file_path)
ext = os.path.splitext(os.path.basename(file_path))[1]
return os.path.join(dirname, new_name + ext)
|
Change the file name in a path but keep the extension
|
def proj4_to_epsg(projection):
"""Attempts to convert a PROJ4 projection object to an EPSG code and returns None if conversion fails"""
def make_definition(value):
return {x.strip().lower() for x in value.split('+') if x}
# Use the EPSG in the definition if available
match = EPSG_RE.search(projection.srs)
if match:
return int(match.group(1))
# Otherwise, try to look up the EPSG from the pyproj data file
pyproj_data_dir = os.path.join(os.path.dirname(pyproj.__file__), 'data')
pyproj_epsg_file = os.path.join(pyproj_data_dir, 'epsg')
if os.path.exists(pyproj_epsg_file):
definition = make_definition(projection.srs)
f = open(pyproj_epsg_file, 'r')
for line in f.readlines():
match = PYPROJ_EPSG_FILE_RE.search(line)
if match:
file_definition = make_definition(match.group(2))
if definition == file_definition:
return int(match.group(1))
return None
|
Attempts to convert a PROJ4 projection object to an EPSG code and returns None if conversion fails
|
def call_action(self, service_name, action_name, **kwargs):
"""Executes the given action. Raise a KeyError on unkown actions."""
action = self.services[service_name].actions[action_name]
return action.execute(**kwargs)
|
Executes the given action. Raise a KeyError on unkown actions.
|
def unpack(self, buff, offset=0):
"""Unpack the buffer into a OxmTLV.
Args:
buff (bytes): The binary data to be unpacked.
offset (int): If we need to shift the beginning of the data.
"""
super().unpack(buff, offset)
# Recover field from field_and_hasmask.
try:
self.oxm_field = self._unpack_oxm_field()
except ValueError as exception:
raise UnpackException(exception)
# The last bit of field_and_mask is oxm_hasmask
self.oxm_hasmask = (self.oxm_field_and_mask & 1) == 1 # as boolean
# Unpack oxm_value that has oxm_length bytes
start = offset + 4 # 4 bytes: class, field_and_mask and length
end = start + self.oxm_length
self.oxm_value = buff[start:end]
|
Unpack the buffer into a OxmTLV.
Args:
buff (bytes): The binary data to be unpacked.
offset (int): If we need to shift the beginning of the data.
|
def _set_route_target_evpn(self, v, load=False):
"""
Setter method for route_target_evpn, mapped from YANG variable /vrf/address_family/ipv6/unicast/route_target_container_ipv6/route_target_evpn (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_target_evpn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_target_evpn() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("action target_community",route_target_evpn.route_target_evpn, yang_name="route-target-evpn", rest_name="route-target", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'VrfRtEvpnAfIpv6Ucast', u'cli-incomplete-command': None, u'alt-name': u'route-target'}}), is_container='list', yang_name="route-target-evpn", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'VrfRtEvpnAfIpv6Ucast', u'cli-incomplete-command': None, u'alt-name': u'route-target'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_target_evpn must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("action target_community",route_target_evpn.route_target_evpn, yang_name="route-target-evpn", rest_name="route-target", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='action target-community', extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'VrfRtEvpnAfIpv6Ucast', u'cli-incomplete-command': None, u'alt-name': u'route-target'}}), is_container='list', yang_name="route-target-evpn", rest_name="route-target", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure Target VPN Extended Communities', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-suppress-list-no': None, u'callpoint': u'VrfRtEvpnAfIpv6Ucast', u'cli-incomplete-command': None, u'alt-name': u'route-target'}}, namespace='urn:brocade.com:mgmt:brocade-vrf', defining_module='brocade-vrf', yang_type='list', is_config=True)""",
})
self.__route_target_evpn = t
if hasattr(self, '_set'):
self._set()
|
Setter method for route_target_evpn, mapped from YANG variable /vrf/address_family/ipv6/unicast/route_target_container_ipv6/route_target_evpn (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_target_evpn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_target_evpn() directly.
|
def complete_reminder(self, reminder_id, complete_dict):
"""
Completes a reminder
:param complete_dict: the complete dict with the template id
:param reminder_id: the reminder id
:return: Response
"""
return self._create_put_request(
resource=REMINDERS,
billomat_id=reminder_id,
command=COMPLETE,
send_data=complete_dict
)
|
Completes a reminder
:param complete_dict: the complete dict with the template id
:param reminder_id: the reminder id
:return: Response
|
def _generate_new_address(self, creator=None) -> str:
"""Generates a new address for the global state.
:return:
"""
if creator:
# TODO: Use nounce
return "0x" + str(mk_contract_address(creator, 0).hex())
while True:
address = "0x" + "".join([str(hex(randint(0, 16)))[-1] for _ in range(40)])
if address not in self.accounts.keys():
return address
|
Generates a new address for the global state.
:return:
|
def insert(self, context):
"""
Deploy application.
:param resort.engine.execution.Context context:
Current execution context.
"""
module_file = open(context.resolve(self.__path), "rb")
data = {
"name": self.__name
}
if self.__context_root is not None:
data["contextroot"] = self.__context_root
status_code, msg = self.__endpoint.post(
"/applications/application",
data=data,
files={
"id": module_file
},
timeout=60.
)
module_file.close()
self.__available = True
|
Deploy application.
:param resort.engine.execution.Context context:
Current execution context.
|
def _resolve_api_id(self):
'''
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
'''
apis = __salt__['boto_apigateway.describe_apis'](name=self.rest_api_name,
description=_Swagger.AWS_API_DESCRIPTION,
**self._common_aws_args).get('restapi')
if apis:
if len(apis) == 1:
self.restApiId = apis[0].get('id')
else:
raise ValueError('Multiple APIs matching given name {0} and '
'description {1}'.format(self.rest_api_name, self.info_json))
|
returns an Api Id that matches the given api_name and the hardcoded _Swagger.AWS_API_DESCRIPTION
as the api description
|
def share_network(network_id, usernames, read_only, share,**kwargs):
"""
Share a network with a list of users, identified by their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users
"""
user_id = kwargs.get('user_id')
net_i = _get_network(network_id)
net_i.check_share_permission(user_id)
if read_only == 'Y':
write = 'N'
share = 'N'
else:
write = 'Y'
if net_i.created_by != int(user_id) and share == 'Y':
raise HydraError("Cannot share the 'sharing' ability as user %s is not"
" the owner of network %s"%
(user_id, network_id))
for username in usernames:
user_i = _get_user(username)
#Set the owner ship on the network itself
net_i.set_owner(user_i.id, write=write, share=share)
for o in net_i.project.owners:
if o.user_id == user_i.id:
break
else:
#Give the user read access to the containing project
net_i.project.set_owner(user_i.id, write='N', share='N')
db.DBSession.flush()
|
Share a network with a list of users, identified by their usernames.
The read_only flag ('Y' or 'N') must be set
to 'Y' to allow write access or sharing.
The share flat ('Y' or 'N') must be set to 'Y' to allow the
project to be shared with other users
|
def add_mismatch(self, entity, *traits):
"""
Add a mismatching entity to the index.
We do this by simply adding the mismatch to the index.
:param collections.Hashable entity: an object to be mismatching the values of `traits_indexed_by`
:param list traits: a list of hashable traits to index the entity with
"""
for trait in traits:
self.index[trait].add(entity)
|
Add a mismatching entity to the index.
We do this by simply adding the mismatch to the index.
:param collections.Hashable entity: an object to be mismatching the values of `traits_indexed_by`
:param list traits: a list of hashable traits to index the entity with
|
def resume(self):
"""
resume the execution
"""
if self.get_state() != Target.TARGET_HALTED:
logging.debug('cannot resume: target not halted')
return
self.notify(Notification(event=Target.EVENT_PRE_RUN, source=self, data=Target.RUN_TYPE_RESUME))
self._run_token += 1
self.clear_debug_cause_bits()
self.write_memory(CortexM.DHCSR, CortexM.DBGKEY | CortexM.C_DEBUGEN)
self.flush()
self.notify(Notification(event=Target.EVENT_POST_RUN, source=self, data=Target.RUN_TYPE_RESUME))
|
resume the execution
|
def set_context(self, data):
"""Load Context with data"""
for key in data:
setattr(self.local_context, key, data[key])
|
Load Context with data
|
def order_by(self, field, orientation='ASC'):
"""
Indica los campos y el criterio de ordenamiento
"""
if isinstance(field, list):
self.raw_order_by.append(field)
else:
self.raw_order_by.append([field, orientation])
return self
|
Indica los campos y el criterio de ordenamiento
|
def QueueQueryAndOwn(self, queue, lease_seconds, limit, timestamp):
"""Returns a list of Tasks leased for a certain time.
Args:
queue: The queue to query from.
lease_seconds: The tasks will be leased for this long.
limit: Number of values to fetch.
timestamp: Range of times for consideration.
Returns:
A list of GrrMessage() objects leased.
"""
# Do the real work in a transaction
try:
lock = DB.LockRetryWrapper(queue, lease_time=lease_seconds)
return self._QueueQueryAndOwn(
lock.subject,
lease_seconds=lease_seconds,
limit=limit,
timestamp=timestamp)
except DBSubjectLockError:
# This exception just means that we could not obtain the lock on the queue
# so we just return an empty list, let the worker sleep and come back to
# fetch more tasks.
return []
except Error as e:
logging.warning("Datastore exception: %s", e)
return []
|
Returns a list of Tasks leased for a certain time.
Args:
queue: The queue to query from.
lease_seconds: The tasks will be leased for this long.
limit: Number of values to fetch.
timestamp: Range of times for consideration.
Returns:
A list of GrrMessage() objects leased.
|
def _raise_on_mode(self, mode):
"""
Checks that the provided query mode is one of the accepted values. If
not, raises a :obj:`ValueError`.
"""
valid_modes = [
'random_sample',
'random_sample_per_pix',
'samples',
'median',
'mean',
'best',
'percentile']
if mode not in valid_modes:
raise ValueError(
'"{}" is not a valid `mode`. Valid modes are:\n'
' {}'.format(mode, valid_modes)
)
|
Checks that the provided query mode is one of the accepted values. If
not, raises a :obj:`ValueError`.
|
def getStats(self):
"""
Parse the file using dedicated reader and collect fields stats. Never
called if user of :class:`~.FileRecordStream` does not invoke
:meth:`~.FileRecordStream.getStats` method.
:returns:
a dictionary of stats. In the current implementation, min and max
fields are supported. Example of the return dictionary is:
.. code-block:: python
{
'min' : [f1_min, f2_min, None, None, fn_min],
'max' : [f1_max, f2_max, None, None, fn_max]
}
(where fx_min/fx_max are set for scalar fields, or None if not)
"""
# Collect stats only once per File object, use fresh csv iterator
# to keep the next() method returning sequential records no matter when
# caller asks for stats
if self._stats == None:
# Stats are only available when reading csv file
assert self._mode == self._FILE_READ_MODE
inFile = open(self._filename, self._FILE_READ_MODE)
# Create a new reader; read names, types, specials
reader = csv.reader(inFile, dialect="excel")
names = [n.strip() for n in reader.next()]
types = [t.strip() for t in reader.next()]
# Skip over specials
reader.next()
# Initialize stats to all None
self._stats = dict()
self._stats['min'] = []
self._stats['max'] = []
for i in xrange(len(names)):
self._stats['min'].append(None)
self._stats['max'].append(None)
# Read the file, collect stats
while True:
try:
line = reader.next()
for i, f in enumerate(line):
if (len(types) > i and
types[i] in [FieldMetaType.integer, FieldMetaType.float] and
f not in self._missingValues):
value = self._adapters[i](f)
if self._stats['max'][i] == None or \
self._stats['max'][i] < value:
self._stats['max'][i] = value
if self._stats['min'][i] == None or \
self._stats['min'][i] > value:
self._stats['min'][i] = value
except StopIteration:
break
return self._stats
|
Parse the file using dedicated reader and collect fields stats. Never
called if user of :class:`~.FileRecordStream` does not invoke
:meth:`~.FileRecordStream.getStats` method.
:returns:
a dictionary of stats. In the current implementation, min and max
fields are supported. Example of the return dictionary is:
.. code-block:: python
{
'min' : [f1_min, f2_min, None, None, fn_min],
'max' : [f1_max, f2_max, None, None, fn_max]
}
(where fx_min/fx_max are set for scalar fields, or None if not)
|
def do_classdesc(self, parent=None, ident=0):
"""
Handles a TC_CLASSDESC opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
"""
# TC_CLASSDESC className serialVersionUID newHandle classDescInfo
# classDescInfo:
# classDescFlags fields classAnnotation superClassDesc
# classDescFlags:
# (byte) // Defined in Terminal Symbols and Constants
# fields:
# (short)<count> fieldDesc[count]
# fieldDesc:
# primitiveDesc
# objectDesc
# primitiveDesc:
# prim_typecode fieldName
# objectDesc:
# obj_typecode fieldName className1
clazz = JavaClass()
log_debug("[classdesc]", ident)
class_name = self._readString()
clazz.name = class_name
log_debug("Class name: %s" % class_name, ident)
# serialVersionUID is a Java (signed) long => 8 bytes
serialVersionUID, classDescFlags = self._readStruct(">qB")
clazz.serialVersionUID = serialVersionUID
clazz.flags = classDescFlags
self._add_reference(clazz, ident)
log_debug(
"Serial: 0x{0:X} / {0:d} - classDescFlags: 0x{1:X} {2}".format(
serialVersionUID, classDescFlags, OpCodeDebug.flags(classDescFlags)
),
ident,
)
(length,) = self._readStruct(">H")
log_debug("Fields num: 0x{0:X}".format(length), ident)
clazz.fields_names = []
clazz.fields_types = []
for fieldId in range(length):
(typecode,) = self._readStruct(">B")
field_name = self._readString()
field_type = self._convert_char_to_type(typecode)
log_debug("> Reading field {0}".format(field_name), ident)
if field_type == self.TYPE_ARRAY:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE)
)
if type(field_type) is not JavaString:
raise AssertionError(
"Field type must be a JavaString, "
"not {0}".format(type(field_type))
)
elif field_type == self.TYPE_OBJECT:
_, field_type = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_STRING, self.TC_REFERENCE)
)
if type(field_type) is JavaClass:
# FIXME: ugly trick
field_type = JavaString(field_type.name)
if type(field_type) is not JavaString:
raise AssertionError(
"Field type must be a JavaString, "
"not {0}".format(type(field_type))
)
log_debug(
"< FieldName: 0x{0:X} Name:{1} Type:{2} ID:{3}".format(
typecode, field_name, field_type, fieldId
),
ident,
)
assert field_name is not None
assert field_type is not None
clazz.fields_names.append(field_name)
clazz.fields_types.append(field_type)
if parent:
parent.__fields = clazz.fields_names
parent.__types = clazz.fields_types
# classAnnotation
(opid,) = self._readStruct(">B")
log_debug(
"OpCode: 0x{0:X} -- {1} (classAnnotation)".format(
opid, OpCodeDebug.op_id(opid)
),
ident,
)
if opid != self.TC_ENDBLOCKDATA:
raise NotImplementedError("classAnnotation isn't implemented yet")
# superClassDesc
log_debug("Reading Super Class of {0}".format(clazz.name), ident)
_, superclassdesc = self._read_and_exec_opcode(
ident=ident + 1, expect=(self.TC_CLASSDESC, self.TC_NULL, self.TC_REFERENCE)
)
log_debug(
"Super Class for {0}: {1}".format(clazz.name, str(superclassdesc)), ident
)
clazz.superclass = superclassdesc
return clazz
|
Handles a TC_CLASSDESC opcode
:param parent:
:param ident: Log indentation level
:return: A JavaClass object
|
def update_cursor(self, dc, grid, row, col):
"""Whites out the old cursor and draws the new one"""
old_row, old_col = self.old_cursor_row_col
bgcolor = get_color(config["background_color"])
self._draw_cursor(dc, grid, old_row, old_col,
pen=wx.Pen(bgcolor), brush=wx.Brush(bgcolor))
self._draw_cursor(dc, grid, row, col)
|
Whites out the old cursor and draws the new one
|
def element_type(self):
"""
Returns the pointed-to type. When the type is not a pointer,
raises exception.
"""
if not self.is_pointer:
raise ValueError("Type {} is not a pointer".format(self))
return TypeRef(ffi.lib.LLVMPY_GetElementType(self))
|
Returns the pointed-to type. When the type is not a pointer,
raises exception.
|
def Clamond(Re, eD, fast=False):
r'''Calculates Darcy friction factor using a solution accurate to almost
machine precision. Recommended very strongly. For details of the algorithm,
see [1]_.
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
fast : bool, optional
If true, performs only one iteration, which gives roughly half the
number of decimals of accuracy, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
This is a highly optimized function, 4 times faster than the solution using
the LambertW function, and faster than many other approximations which are
much less accurate.
The code used here is only slightly modified than that in [1]_, for further
performance improvements.
For 10 < Re < 1E12, and 0 < eD < 0.01, this equation has been confirmed
numerically to provide a solution to the Colebrook equation accurate to an
rtol of 1E-9 or better - the same level of accuracy as the analytical
solution to the Colebrook equation due to floating point precision.
Comparing this to the numerical solution of the Colebrook equation,
identical values are given accurate to an rtol of 1E-9 for 10 < Re < 1E100,
and 0 < eD < 1 and beyond.
However, for values of Re under 10, different answers from the `Colebrook`
equation appear and then quickly a ValueError is raised.
Examples
--------
>>> Clamond(1E5, 1E-4)
0.01851386607747165
References
----------
.. [1] Clamond, Didier. "Efficient Resolution of the Colebrook Equation."
Industrial & Engineering Chemistry Research 48, no. 7 (April 1, 2009):
3665-71. doi:10.1021/ie801626g.
http://math.unice.fr/%7Edidierc/DidPublis/ICR_2009.pdf
'''
X1 = eD*Re*0.1239681863354175460160858261654858382699 # (log(10)/18.574).evalf(40)
X2 = log(Re) - 0.7793974884556819406441139701653776731705 # log(log(10)/5.02).evalf(40)
F = X2 - 0.2
X1F = X1 + F
X1F1 = 1. + X1F
E = (log(X1F) - 0.2)/(X1F1)
F = F - (X1F1 + 0.5*E)*E*(X1F)/(X1F1 + E*(1. + E*0.3333333333333333))
if not fast:
X1F = X1 + F
X1F1 = 1. + X1F
E = (log(X1F) + F - X2)/(X1F1)
F = F - (X1F1 + 0.5*E)*E*(X1F)/(X1F1 + E*(1. + E*0.3333333333333333))
return 1.325474527619599502640416597148504422899/(F*F)
|
r'''Calculates Darcy friction factor using a solution accurate to almost
machine precision. Recommended very strongly. For details of the algorithm,
see [1]_.
Parameters
----------
Re : float
Reynolds number, [-]
eD : float
Relative roughness, [-]
fast : bool, optional
If true, performs only one iteration, which gives roughly half the
number of decimals of accuracy, [-]
Returns
-------
fd : float
Darcy friction factor [-]
Notes
-----
This is a highly optimized function, 4 times faster than the solution using
the LambertW function, and faster than many other approximations which are
much less accurate.
The code used here is only slightly modified than that in [1]_, for further
performance improvements.
For 10 < Re < 1E12, and 0 < eD < 0.01, this equation has been confirmed
numerically to provide a solution to the Colebrook equation accurate to an
rtol of 1E-9 or better - the same level of accuracy as the analytical
solution to the Colebrook equation due to floating point precision.
Comparing this to the numerical solution of the Colebrook equation,
identical values are given accurate to an rtol of 1E-9 for 10 < Re < 1E100,
and 0 < eD < 1 and beyond.
However, for values of Re under 10, different answers from the `Colebrook`
equation appear and then quickly a ValueError is raised.
Examples
--------
>>> Clamond(1E5, 1E-4)
0.01851386607747165
References
----------
.. [1] Clamond, Didier. "Efficient Resolution of the Colebrook Equation."
Industrial & Engineering Chemistry Research 48, no. 7 (April 1, 2009):
3665-71. doi:10.1021/ie801626g.
http://math.unice.fr/%7Edidierc/DidPublis/ICR_2009.pdf
|
def _execute(self, api_command, *, timeout=None):
"""Execute the command."""
if api_command.observe:
self._observe(api_command)
return
method = api_command.method
path = api_command.path
data = api_command.data
parse_json = api_command.parse_json
url = api_command.url(self._host)
proc_timeout = self._timeout
if timeout is not None:
proc_timeout = timeout
command = self._base_command(method)
kwargs = {
'stderr': subprocess.DEVNULL,
'timeout': proc_timeout,
'universal_newlines': True,
}
if data is not None:
kwargs['input'] = json.dumps(data)
command.append('-f')
command.append('-')
_LOGGER.debug('Executing %s %s %s: %s', self._host, method, path,
data)
else:
_LOGGER.debug('Executing %s %s %s', self._host, method, path)
command.append(url)
try:
return_value = subprocess.check_output(command, **kwargs)
except subprocess.TimeoutExpired:
raise RequestTimeout() from None
except subprocess.CalledProcessError as err:
raise RequestError(
'Error executing request: {}'.format(err)) from None
api_command.result = _process_output(return_value, parse_json)
return api_command.result
|
Execute the command.
|
def _init_unique_sets(self):
"""Initialise sets used for uniqueness checking."""
ks = dict()
for t in self._unique_checks:
key = t[0]
ks[key] = set() # empty set
return ks
|
Initialise sets used for uniqueness checking.
|
def update(self, _attributes=None, **attributes):
"""
Perform an update on all the related models.
:param attributes: The attributes
:type attributes: dict
:rtype: int
"""
if _attributes is not None:
attributes.update(_attributes)
if self._related.uses_timestamps():
attributes[self.get_related_updated_at()] = self._related.fresh_timestamp()
return self._query.update(attributes)
|
Perform an update on all the related models.
:param attributes: The attributes
:type attributes: dict
:rtype: int
|
def read_history_file(self, filename=None):
u'''Load a readline history file.'''
if filename is None:
filename = self.history_filename
try:
for line in open(filename, u'r'):
self.add_history(lineobj.ReadLineTextBuffer(ensure_unicode(line.rstrip())))
except IOError:
self.history = []
self.history_cursor = 0
|
u'''Load a readline history file.
|
def get_documents(self, subtypes=None, refresh=False):
"""Return list of author's publications using ScopusSearch, which
fit a specified set of document subtypes.
"""
search = ScopusSearch('au-id({})'.format(self.identifier), refresh)
if subtypes:
return [p for p in search.results if p.subtype in subtypes]
else:
return search.results
|
Return list of author's publications using ScopusSearch, which
fit a specified set of document subtypes.
|
def create_styles(title,defaults=None,mappings=None,host=cytoscape_host,port=cytoscape_port):
"""
Creates a new visual style
:param title: title of the visual style
:param defaults: a list of dictionaries for each visualProperty
:param mappings: a list of dictionaries for each visualProperty
:param host: cytoscape host address, default=cytoscape_host
:param port: cytoscape port, default=1234
:retunrs: nothing
"""
if defaults:
defaults_=[]
for d in defaults:
if d:
defaults_.append(d)
defaults=defaults_
if mappings:
mappings_=[]
for m in mappings:
if m:
mappings_.append(m)
mappings=mappings_
try:
update_style(title,defaults=defaults,mappings=mappings,host=host,port=port)
print("Existing style was updated.")
sys.stdout.flush()
except:
print("Creating new style.")
sys.stdout.flush()
URL="http://"+str(host)+":"+str(port)+"/v1/styles"
PARAMS={"title":title,\
"defaults":defaults,\
"mappings":mappings}
r = requests.post(url = URL, json = PARAMS)
CheckResponse(r)
|
Creates a new visual style
:param title: title of the visual style
:param defaults: a list of dictionaries for each visualProperty
:param mappings: a list of dictionaries for each visualProperty
:param host: cytoscape host address, default=cytoscape_host
:param port: cytoscape port, default=1234
:retunrs: nothing
|
def search_variant_sets(self, dataset_id):
"""
Returns an iterator over the VariantSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset`
of interest.
:return: An iterator over the :class:`ga4gh.protocol.VariantSet`
objects defined by the query parameters.
"""
request = protocol.SearchVariantSetsRequest()
request.dataset_id = dataset_id
request.page_size = pb.int(self._page_size)
return self._run_search_request(
request, "variantsets", protocol.SearchVariantSetsResponse)
|
Returns an iterator over the VariantSets fulfilling the specified
conditions from the specified Dataset.
:param str dataset_id: The ID of the :class:`ga4gh.protocol.Dataset`
of interest.
:return: An iterator over the :class:`ga4gh.protocol.VariantSet`
objects defined by the query parameters.
|
def verify(self, obj):
"""Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
"""
if not isinstance(obj, int):
raise ValidationError("Object is not a int", reason='object is not a int', object=obj,
type=type(obj), int_type=int)
return obj
|
Verify that the object conforms to this verifier's schema
Args:
obj (object): A python object to verify
Raises:
ValidationError: If there is a problem verifying the dictionary, a
ValidationError is thrown with at least the reason key set indicating
the reason for the lack of validation.
|
def _tls_auth_encrypt(self, s):
"""
Return the TLSCiphertext.fragment for AEAD ciphers, i.e. the whole
GenericAEADCipher. Also, the additional data is computed right here.
"""
write_seq_num = struct.pack("!Q", self.tls_session.wcs.seq_num)
self.tls_session.wcs.seq_num += 1
add_data = (write_seq_num +
pkcs_i2osp(self.type, 1) +
pkcs_i2osp(self.version, 2) +
pkcs_i2osp(len(s), 2))
return self.tls_session.wcs.cipher.auth_encrypt(s, add_data,
write_seq_num)
|
Return the TLSCiphertext.fragment for AEAD ciphers, i.e. the whole
GenericAEADCipher. Also, the additional data is computed right here.
|
def pvlan_host_association(self, **kwargs):
"""Set interface PVLAN association.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
pri_vlan (str): The primary PVLAN.
sec_vlan (str): The secondary PVLAN.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan` is not
specified.
ValueError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan`
is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/38'
>>> pri_vlan = '75'
>>> sec_vlan = '100'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.private_vlan_type(name=pri_vlan,
... pvlan_type='primary')
... output = dev.interface.private_vlan_type(name=sec_vlan,
... pvlan_type='isolated')
... output = dev.interface.vlan_pvlan_association_add(
... name=pri_vlan, sec_vlan=sec_vlan)
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.private_vlan_mode(
... int_type=int_type, name=name, mode='host')
... output = dev.interface.pvlan_host_association(
... int_type=int_type, name=name, pri_vlan=pri_vlan,
... sec_vlan=sec_vlan)
... dev.interface.pvlan_host_association()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
"""
int_type = kwargs.pop('int_type').lower()
name = kwargs.pop('name')
pri_vlan = kwargs.pop('pri_vlan')
sec_vlan = kwargs.pop('sec_vlan')
callback = kwargs.pop('callback', self._callback)
int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet',
'port_channel']
if int_type not in int_types:
raise ValueError("Incorrect int_type value.")
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces or x for port channel.')
if not pynos.utilities.valid_vlan_id(pri_vlan):
raise InvalidVlanId("`sec_vlan` must be between `1` and `4095`.")
if not pynos.utilities.valid_vlan_id(sec_vlan):
raise InvalidVlanId("`sec_vlan` must be between `1` and `4095`.")
pvlan_args = dict(name=name, host_pri_pvlan=pri_vlan)
associate_pvlan = getattr(self._interface,
'interface_%s_switchport_private_vlan_'
'host_association_host_pri_pvlan' %
int_type)
config = associate_pvlan(**pvlan_args)
sec_assoc = config.find('.//*host-association')
sec_assoc = ET.SubElement(sec_assoc, 'host-sec-pvlan')
sec_assoc.text = sec_vlan
return callback(config)
|
Set interface PVLAN association.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet, etc)
name (str): Name of interface. (1/0/5, 1/0/10, etc)
pri_vlan (str): The primary PVLAN.
sec_vlan (str): The secondary PVLAN.
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
Return value of `callback`.
Raises:
KeyError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan` is not
specified.
ValueError: if `int_type`, `name`, `pri_vlan`, or `sec_vlan`
is invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> int_type = 'tengigabitethernet'
>>> name = '225/0/38'
>>> pri_vlan = '75'
>>> sec_vlan = '100'
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... output = dev.interface.private_vlan_type(name=pri_vlan,
... pvlan_type='primary')
... output = dev.interface.private_vlan_type(name=sec_vlan,
... pvlan_type='isolated')
... output = dev.interface.vlan_pvlan_association_add(
... name=pri_vlan, sec_vlan=sec_vlan)
... output = dev.interface.enable_switchport(int_type,
... name)
... output = dev.interface.private_vlan_mode(
... int_type=int_type, name=name, mode='host')
... output = dev.interface.pvlan_host_association(
... int_type=int_type, name=name, pri_vlan=pri_vlan,
... sec_vlan=sec_vlan)
... dev.interface.pvlan_host_association()
... # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError
|
def _make_image_description(self, datasets, **kwargs):
"""
generate image description for mitiff.
Satellite: NOAA 18
Date and Time: 06:58 31/05-2016
SatDir: 0
Channels: 6 In this file: 1-VIS0.63 2-VIS0.86 3(3B)-IR3.7
4-IR10.8 5-IR11.5 6(3A)-VIS1.6
Xsize: 4720
Ysize: 5544
Map projection: Stereographic
Proj string: +proj=stere +lon_0=0 +lat_0=90 +lat_ts=60
+ellps=WGS84 +towgs84=0,0,0 +units=km
+x_0=2526000.000000 +y_0=5806000.000000
TrueLat: 60 N
GridRot: 0
Xunit:1000 m Yunit: 1000 m
NPX: 0.000000 NPY: 0.000000
Ax: 1.000000 Ay: 1.000000 Bx: -2526.000000 By: -262.000000
Satellite: <satellite name>
Date and Time: <HH:MM dd/mm-yyyy>
SatDir: 0
Channels: <number of chanels> In this file: <channels names in order>
Xsize: <number of pixels x>
Ysize: <number of pixels y>
Map projection: Stereographic
Proj string: <proj4 string with +x_0 and +y_0 which is the positive
distance from proj origo
to the lower left corner of the image data>
TrueLat: 60 N
GridRot: 0
Xunit:1000 m Yunit: 1000 m
NPX: 0.000000 NPY: 0.000000
Ax: <pixels size x in km> Ay: <pixel size y in km> Bx: <left corner of
upper right pixel in km>
By: <upper corner of upper right pixel in km>
if palette image write special palette
if normal channel write table calibration:
Table_calibration: <channel name>, <calibration type>, [<unit>],
<no of bits of data>,
[<calibration values space separated>]\n\n
"""
translate_platform_name = {'metop01': 'Metop-B',
'metop02': 'Metop-A',
'metop03': 'Metop-C',
'noaa15': 'NOAA-15',
'noaa16': 'NOAA-16',
'noaa17': 'NOAA-17',
'noaa18': 'NOAA-18',
'noaa19': 'NOAA-19'}
first_dataset = datasets
if isinstance(datasets, list):
LOG.debug("Datasets is a list of dataset")
first_dataset = datasets[0]
if 'platform_name' in first_dataset.attrs:
_platform_name = translate_platform_name.get(
first_dataset.attrs['platform_name'],
first_dataset.attrs['platform_name'])
elif 'platform_name' in kwargs:
_platform_name = translate_platform_name.get(
kwargs['platform_name'], kwargs['platform_name'])
else:
_platform_name = None
_image_description = ''
_image_description.encode('utf-8')
_image_description += ' Satellite: '
if _platform_name is not None:
_image_description += _platform_name
_image_description += '\n'
_image_description += ' Date and Time: '
# Select earliest start_time
first = True
earliest = 0
for dataset in datasets:
if first:
earliest = dataset.attrs['start_time']
else:
if dataset.attrs['start_time'] < earliest:
earliest = dataset.attrs['start_time']
first = False
LOG.debug("earliest start_time: %s", earliest)
_image_description += earliest.strftime("%H:%M %d/%m-%Y\n")
_image_description += ' SatDir: 0\n'
_image_description += ' Channels: '
if isinstance(datasets, list):
LOG.debug("len datasets: %s", len(datasets))
_image_description += str(len(datasets))
elif 'bands' in datasets.sizes:
LOG.debug("len datasets: %s", datasets.sizes['bands'])
_image_description += str(datasets.sizes['bands'])
elif len(datasets.sizes) == 2:
LOG.debug("len datasets: 1")
_image_description += '1'
_image_description += ' In this file: '
channels = self._make_channel_list(datasets, **kwargs)
try:
cns = self.translate_channel_name.get(kwargs['sensor'], {})
except KeyError:
pass
_image_description += self._channel_names(channels, cns, **kwargs)
_image_description += self._add_sizes(datasets, first_dataset)
_image_description += ' Map projection: Stereographic\n'
_image_description += self._add_proj4_string(datasets, first_dataset)
_image_description += ' TrueLat: 60N\n'
_image_description += ' GridRot: 0\n'
_image_description += ' Xunit:1000 m Yunit: 1000 m\n'
_image_description += ' NPX: %.6f' % (0)
_image_description += ' NPY: %.6f' % (0) + '\n'
_image_description += self._add_pixel_sizes(datasets, first_dataset)
_image_description += self._add_corners(datasets, first_dataset)
if isinstance(datasets, list):
LOG.debug("Area extent: %s", first_dataset.attrs['area'].area_extent)
else:
LOG.debug("Area extent: %s", datasets.attrs['area'].area_extent)
_image_description += self._add_calibration(channels, cns, datasets, **kwargs)
return _image_description
|
generate image description for mitiff.
Satellite: NOAA 18
Date and Time: 06:58 31/05-2016
SatDir: 0
Channels: 6 In this file: 1-VIS0.63 2-VIS0.86 3(3B)-IR3.7
4-IR10.8 5-IR11.5 6(3A)-VIS1.6
Xsize: 4720
Ysize: 5544
Map projection: Stereographic
Proj string: +proj=stere +lon_0=0 +lat_0=90 +lat_ts=60
+ellps=WGS84 +towgs84=0,0,0 +units=km
+x_0=2526000.000000 +y_0=5806000.000000
TrueLat: 60 N
GridRot: 0
Xunit:1000 m Yunit: 1000 m
NPX: 0.000000 NPY: 0.000000
Ax: 1.000000 Ay: 1.000000 Bx: -2526.000000 By: -262.000000
Satellite: <satellite name>
Date and Time: <HH:MM dd/mm-yyyy>
SatDir: 0
Channels: <number of chanels> In this file: <channels names in order>
Xsize: <number of pixels x>
Ysize: <number of pixels y>
Map projection: Stereographic
Proj string: <proj4 string with +x_0 and +y_0 which is the positive
distance from proj origo
to the lower left corner of the image data>
TrueLat: 60 N
GridRot: 0
Xunit:1000 m Yunit: 1000 m
NPX: 0.000000 NPY: 0.000000
Ax: <pixels size x in km> Ay: <pixel size y in km> Bx: <left corner of
upper right pixel in km>
By: <upper corner of upper right pixel in km>
if palette image write special palette
if normal channel write table calibration:
Table_calibration: <channel name>, <calibration type>, [<unit>],
<no of bits of data>,
[<calibration values space separated>]\n\n
|
def main(args):
"""Launch the appropriate builder."""
grr_config.CONFIG.AddContext(contexts.CLIENT_BUILD_CONTEXT)
if args.subparser_name == "generate_client_config":
# We don't need a full init to just build a config.
GetClientConfig(args.client_config_output)
return
# TODO(user): Find out if adding the client-builder context is still
# necessary.
context = FLAGS.context
context.append(contexts.CLIENT_BUILD_CONTEXT)
config_lib.SetPlatformArchContext()
config_lib.ParseConfigCommandLine()
# Use basic console output logging so we can see what is happening.
logger = logging.getLogger()
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG if FLAGS.verbose else logging.INFO)
logger.handlers = [handler]
if args.subparser_name == "build":
if grr_config.CONFIG["Client.fleetspeak_enabled"]:
if grr_config.CONFIG.ContextApplied("Platform:Darwin"):
if not args.fleetspeak_service_config:
raise RuntimeError("--fleetspeak_service_config must be provided.")
if not grr_config.CONFIG.Get("ClientBuilder.install_dir"):
raise RuntimeError("ClientBuilder.install_dir must be set.")
if not grr_config.CONFIG.Get("ClientBuilder.fleetspeak_plist_path"):
raise RuntimeError("ClientBuilder.fleetspeak_plist_path must be set.")
grr_config.CONFIG.Set("ClientBuilder.client_path",
"grr_response_client.grr_fs_client")
TemplateBuilder().BuildTemplate(
context=context,
output=args.output,
fleetspeak_service_config=args.fleetspeak_service_config)
elif args.subparser_name == "repack":
if args.debug_build:
context.append("DebugClientBuild Context")
result_path = repacking.TemplateRepacker().RepackTemplate(
args.template,
args.output_dir,
context=context,
sign=args.sign,
signed_template=args.signed_template)
if not result_path:
raise ErrorDuringRepacking(" ".join(sys.argv[:]))
elif args.subparser_name == "repack_multiple":
# Resolve globs manually on Windows.
templates = []
for template in args.templates:
if "*" in template:
templates.extend(glob.glob(template))
else:
# This could go through glob but then we'd swallow errors for
# non existing files.
templates.append(template)
repack_configs = []
for repack_config in args.repack_configs:
if "*" in repack_config:
repack_configs.extend(glob.glob(repack_config))
else:
# This could go through glob but then we'd swallow errors for
# non existing files.
repack_configs.append(repack_config)
MultiTemplateRepacker().RepackTemplates(
repack_configs,
templates,
args.output_dir,
config=FLAGS.config,
sign=args.sign,
signed_template=args.signed_template)
elif args.subparser_name == "sign_template":
repacking.TemplateRepacker().SignTemplate(
args.template, args.output_file, context=context)
if not os.path.exists(args.output_file):
raise RuntimeError("Signing failed: output not written")
|
Launch the appropriate builder.
|
def _setup_axes(cls, axes, info_axis=None, stat_axis=None, aliases=None,
slicers=None, axes_are_reversed=False, build_axes=True,
ns=None, docs=None):
"""Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_SLICEMAP = slicers or None
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, '_typ', cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
assert not isinstance(ns, dict)
|
Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
slicers : how axes slice to others (dict)
axes_are_reversed : boolean whether to treat passed axes as
reversed (DataFrame)
build_axes : setup the axis properties (default True)
|
def parent(self):
"""
Get the parent of the element
@rtype: WebElementWrapper
@return: Parent of webelementwrapper on which this was invoked
"""
def parent_element():
"""
Wrapper to retrieve parent element
"""
return WebElementWrapper(self.driver_wrapper, self.locator, self.element.parent)
return self.execute_and_handle_webelement_exceptions(parent_element, 'get parent')
|
Get the parent of the element
@rtype: WebElementWrapper
@return: Parent of webelementwrapper on which this was invoked
|
def record(self):
"""
Record PyAudio stream into StringIO output
This coroutine keeps stream open; the stream is closed in stop()
"""
while True:
frames = []
self.stream.start_stream()
for i in range(self.num_frames):
data = self.stream.read(self.config.FRAMES_PER_BUFFER)
frames.append(data)
self.output.seek(0)
w = wave.open(self.output, 'wb')
w.setnchannels(self.config.CHANNELS)
w.setsampwidth(self.audio.get_sample_size(self.config.FORMAT))
w.setframerate(self.config.RATE)
w.writeframes(b''.join(frames))
w.close()
yield
|
Record PyAudio stream into StringIO output
This coroutine keeps stream open; the stream is closed in stop()
|
def argmin(self, axis=None, skipna=True, *args, **kwargs):
"""
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
"""
nv.validate_argmin(args, kwargs)
nv.validate_minmax_axis(axis)
i8 = self.asi8
if self.hasnans:
mask = self._isnan
if mask.all() or not skipna:
return -1
i8 = i8.copy()
i8[mask] = np.iinfo('int64').max
return i8.argmin()
|
Returns the indices of the minimum values along an axis.
See `numpy.ndarray.argmin` for more information on the
`axis` parameter.
See Also
--------
numpy.ndarray.argmin
|
def next_event(self):
"""Simulates the queue forward one event.
Use :meth:`.simulate` instead.
Returns
-------
out : :class:`.Agent` (sometimes)
If the next event is a departure then the departing agent
is returned, otherwise nothing is returned.
See Also
--------
:meth:`.simulate` : Simulates the queue forward.
"""
if self._departures[0]._time < self._arrivals[0]._time:
new_depart = heappop(self._departures)
self._current_t = new_depart._time
self._num_total -= 1
self.num_system -= 1
self.num_departures += 1
if self.collect_data and new_depart.agent_id in self.data:
self.data[new_depart.agent_id][-1][2] = self._current_t
if len(self.queue) > 0:
agent = self.queue.popleft()
if self.collect_data and agent.agent_id in self.data:
self.data[agent.agent_id][-1][1] = self._current_t
agent._time = self.service_f(self._current_t)
agent.queue_action(self, 1)
heappush(self._departures, agent)
new_depart.queue_action(self, 2)
self._update_time()
return new_depart
elif self._arrivals[0]._time < infty:
arrival = heappop(self._arrivals)
self._current_t = arrival._time
if self._active:
self._add_arrival()
self.num_system += 1
self._num_arrivals += 1
if self.collect_data:
b = 0 if self.num_system <= self.num_servers else 1
if arrival.agent_id not in self.data:
self.data[arrival.agent_id] = \
[[arrival._time, 0, 0, len(self.queue) + b, self.num_system]]
else:
self.data[arrival.agent_id]\
.append([arrival._time, 0, 0, len(self.queue) + b, self.num_system])
arrival.queue_action(self, 0)
if self.num_system <= self.num_servers:
if self.collect_data:
self.data[arrival.agent_id][-1][1] = arrival._time
arrival._time = self.service_f(arrival._time)
arrival.queue_action(self, 1)
heappush(self._departures, arrival)
else:
self.queue.append(arrival)
self._update_time()
|
Simulates the queue forward one event.
Use :meth:`.simulate` instead.
Returns
-------
out : :class:`.Agent` (sometimes)
If the next event is a departure then the departing agent
is returned, otherwise nothing is returned.
See Also
--------
:meth:`.simulate` : Simulates the queue forward.
|
def generate_bigip_uri(base_uri, partition, name, sub_path, suffix, **kwargs):
'''(str, str, str) --> str
This function checks the supplied elements to see if each conforms to
the specification for the appropriate part of the URI. These validations
are conducted by the helper function _validate_uri_parts.
After validation the parts are assembled into a valid BigIP REST URI
string which is then submitted with appropriate metadata.
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \
'CUSTOMER1', 'nat52', params={'a':1})
'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52'
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \
'CUSTOMER1', 'nat52', params={'a':1}, suffix='/wacky')
'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52/wacky'
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', '', '', \
params={'a':1}, suffix='/thwocky')
'https://0.0.0.0/mgmt/tm/ltm/nat/thwocky'
::Warning: There are cases where '/' and '~' characters are valid in the
object name or subPath. This is indicated by passing the 'transform_name' or 'transform_subpath' boolean
respectively as True. By default this is set to False.
'''
_validate_uri_parts(base_uri, partition, name, sub_path, suffix,
**kwargs)
if kwargs.get('transform_name', False):
if name != '':
name = name.replace('/', '~')
if kwargs.get('transform_subpath', False):
if sub_path != '':
sub_path = sub_path.replace('/', '~')
if partition != '':
partition = '~' + partition
else:
if sub_path:
msg = 'When giving the subPath component include partition ' \
'as well.'
raise InvalidURIComponentPart(msg)
if sub_path != '' and partition != '':
sub_path = '~' + sub_path
if name != '' and partition != '':
name = '~' + name
tilded_partition_and_instance = partition + sub_path + name
if suffix and not tilded_partition_and_instance:
suffix = suffix.lstrip('/')
REST_uri = base_uri + tilded_partition_and_instance + suffix
return REST_uri
|
(str, str, str) --> str
This function checks the supplied elements to see if each conforms to
the specification for the appropriate part of the URI. These validations
are conducted by the helper function _validate_uri_parts.
After validation the parts are assembled into a valid BigIP REST URI
string which is then submitted with appropriate metadata.
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \
'CUSTOMER1', 'nat52', params={'a':1})
'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52'
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', \
'CUSTOMER1', 'nat52', params={'a':1}, suffix='/wacky')
'https://0.0.0.0/mgmt/tm/ltm/nat/~CUSTOMER1~nat52/wacky'
>>> generate_bigip_uri('https://0.0.0.0/mgmt/tm/ltm/nat/', '', '', \
params={'a':1}, suffix='/thwocky')
'https://0.0.0.0/mgmt/tm/ltm/nat/thwocky'
::Warning: There are cases where '/' and '~' characters are valid in the
object name or subPath. This is indicated by passing the 'transform_name' or 'transform_subpath' boolean
respectively as True. By default this is set to False.
|
def makeUserLoginMethod(username, password, locale=None):
'''Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.'''
def _doLogin(soapStub):
si = vim.ServiceInstance("ServiceInstance", soapStub)
sm = si.content.sessionManager
if not sm.currentSession:
si.content.sessionManager.Login(username, password, locale)
return _doLogin
|
Return a function that will call the vim.SessionManager.Login() method
with the given parameters. The result of this function can be passed as
the "loginMethod" to a SessionOrientedStub constructor.
|
def router_function(fn):
# type: (Callable) -> Callable
"""Raise a runtime error if on Win32 systems.
Decorator.
Decorator for functions that interact with the router for the Linux
implementation of the ADS library.
Unlike the Windows implementation which uses a separate router daemon,
the Linux library manages AMS routing in-process. As such, routing must be
configured programatically via. the provided API. These endpoints are
invalid on Win32 systems, so an exception will be raised.
"""
@wraps(fn)
def wrapper(*args, **kwargs):
# type: (Any, Any) -> Callable
if platform_is_windows(): # pragma: no cover, skipt Windows test
raise RuntimeError(
"Router interface is not available on Win32 systems.\n"
"Configure AMS routes using the TwinCAT router service."
)
return fn(*args, **kwargs)
return wrapper
|
Raise a runtime error if on Win32 systems.
Decorator.
Decorator for functions that interact with the router for the Linux
implementation of the ADS library.
Unlike the Windows implementation which uses a separate router daemon,
the Linux library manages AMS routing in-process. As such, routing must be
configured programatically via. the provided API. These endpoints are
invalid on Win32 systems, so an exception will be raised.
|
def _get_content_type(self, file):
"""
Return content type of file. If file does not
have a content type, make a guess.
"""
if file.mimetype:
return file.mimetype
# get file extension
_, extension = os.path.splitext(file.name)
extension = extension.strip('.')
# Make an educated guess about what the Content-Type should be.
return media_types[extension] if extension in media_types else 'binary/octet-stream'
|
Return content type of file. If file does not
have a content type, make a guess.
|
def _encode_text(name, value, dummy0, dummy1):
"""Encode a python unicode (python 2.x) / str (python 3.x)."""
value = _utf_8_encode(value)[0]
return b"\x02" + name + _PACK_INT(len(value) + 1) + value + b"\x00"
|
Encode a python unicode (python 2.x) / str (python 3.x).
|
def get_passenger_queue_stats(self):
"""
Execute passenger-stats, parse its output, returnand requests in queue
"""
queue_stats = {
"top_level_queue_size": 0.0,
"passenger_queue_size": 0.0,
}
command = [self.config["passenger_status_bin"]]
if str_to_bool(self.config["use_sudo"]):
command.insert(0, self.config["sudo_cmd"])
try:
proc1 = subprocess.Popen(command, stdout=subprocess.PIPE)
(std_out, std_err) = proc1.communicate()
except OSError:
return {}
if std_out is None:
return {}
re_colour = re.compile("\x1B\[([0-9]{1,3}((;[0-9]{1,3})*)?)?[m|K]")
re_requests = re.compile(r"Requests")
re_topqueue = re.compile(r"^top-level")
gen_info_flag = 0
app_groups_flag = 0
for raw_line in std_out.splitlines():
line = re_colour.sub("", raw_line)
if "General information" in line:
gen_info_flag = 1
if "Application groups" in line:
app_groups_flag = 1
elif re_requests.match(line) and re_topqueue.search(line):
# If line starts with Requests and line has top-level queue then
# store queue size
line_splitted = line.split()
if gen_info_flag == 1 and line_splitted:
queue_stats["top_level_queue_size"] = float(
line_splitted[5])
elif re_requests.search(line) and not re_topqueue.search(line):
# If line has Requests and nothing else special
line_splitted = line.split()
if app_groups_flag == 1 and line_splitted:
queue_stats["passenger_queue_size"] = float(
line_splitted[3])
return queue_stats
|
Execute passenger-stats, parse its output, returnand requests in queue
|
def SetProtocol(self, protocol):
"""Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: If an invalid protocol is selected.
"""
protocol = protocol.lower().strip()
if protocol not in ['http', 'https']:
raise ValueError('Invalid protocol specified for Viper lookup')
self._analyzer.SetProtocol(protocol)
|
Sets the protocol that will be used to query Viper.
Args:
protocol (str): protocol to use to query Viper. Either 'http' or 'https'.
Raises:
ValueError: If an invalid protocol is selected.
|
def reset(self, hard=False):
'''reset the card dispense, either soft or hard based on boolean 2nd arg'''
if hard:
self.sendcommand(Vendapin.RESET, 1, 0x01)
time.sleep(2)
else:
self.sendcommand(Vendapin.RESET)
time.sleep(2)
# parse the reply
response = self.receivepacket()
print('Vendapin.reset(soft): ' + str(response))
|
reset the card dispense, either soft or hard based on boolean 2nd arg
|
def _configure_key_pair(config):
"""Configure SSH access, using an existing key pair if possible.
Creates a project-wide ssh key that can be used to access all the instances
unless explicitly prohibited by instance config.
The ssh-keys created by ray are of format:
[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]
where:
[USERNAME] is the user for the SSH key, specified in the config.
[KEY_VALUE] is the public SSH key value.
"""
if "ssh_private_key" in config["auth"]:
return config
ssh_user = config["auth"]["ssh_user"]
project = compute.projects().get(
project=config["provider"]["project_id"]).execute()
# Key pairs associated with project meta data. The key pairs are general,
# and not just ssh keys.
ssh_keys_str = next(
(item for item in project["commonInstanceMetadata"].get("items", [])
if item["key"] == "ssh-keys"), {}).get("value", "")
ssh_keys = ssh_keys_str.split("\n") if ssh_keys_str else []
# Try a few times to get or create a good key pair.
key_found = False
for i in range(10):
key_name = key_pair_name(i, config["provider"]["region"],
config["provider"]["project_id"], ssh_user)
public_key_path, private_key_path = key_pair_paths(key_name)
for ssh_key in ssh_keys:
key_parts = ssh_key.split(" ")
if len(key_parts) != 3:
continue
if key_parts[2] == ssh_user and os.path.exists(private_key_path):
# Found a key
key_found = True
break
# Create a key since it doesn't exist locally or in GCP
if not key_found and not os.path.exists(private_key_path):
logger.info("_configure_key_pair: "
"Creating new key pair {}".format(key_name))
public_key, private_key = generate_rsa_key_pair()
_create_project_ssh_key_pair(project, public_key, ssh_user)
with open(private_key_path, "w") as f:
f.write(private_key)
os.chmod(private_key_path, 0o600)
with open(public_key_path, "w") as f:
f.write(public_key)
key_found = True
break
if key_found:
break
assert key_found, "SSH keypair for user {} not found for {}".format(
ssh_user, private_key_path)
assert os.path.exists(private_key_path), (
"Private key file {} not found for user {}"
"".format(private_key_path, ssh_user))
logger.info("_configure_key_pair: "
"Private key not specified in config, using"
"{}".format(private_key_path))
config["auth"]["ssh_private_key"] = private_key_path
return config
|
Configure SSH access, using an existing key pair if possible.
Creates a project-wide ssh key that can be used to access all the instances
unless explicitly prohibited by instance config.
The ssh-keys created by ray are of format:
[USERNAME]:ssh-rsa [KEY_VALUE] [USERNAME]
where:
[USERNAME] is the user for the SSH key, specified in the config.
[KEY_VALUE] is the public SSH key value.
|
def _set_mpls_interface(self, v, load=False):
"""
Setter method for mpls_interface, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("interface_type interface_name",mpls_interface.mpls_interface, yang_name="mpls-interface", rest_name="mpls-interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}), is_container='list', yang_name="mpls-interface", rest_name="mpls-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("interface_type interface_name",mpls_interface.mpls_interface, yang_name="mpls-interface", rest_name="mpls-interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-type interface-name', extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}), is_container='list', yang_name="mpls-interface", rest_name="mpls-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Define MPLS Interface', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'callpoint': u'MplsInterface', u'cli-mode-name': u'config-router-mpls-if-$(interface-type)-$(interface-name)'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='list', is_config=True)""",
})
self.__mpls_interface = t
if hasattr(self, '_set'):
self._set()
|
Setter method for mpls_interface, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/mpls_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_interface() directly.
|
def render_document(template_name, data_name, output_name):
"""
Combines a MarkDown template file from the aide_document package with a local associated YAML data file, then outputs the rendered combination to a local MarkDown output file.
Parameters
==========
template_name : String
Exact name of the MarkDown template file from the aide_document/templates folder. Do not use the file path.
data_name : String
Relative file path from where this method is called to the location of the YAML data file to be used.
output_name : String
Relative file path from where this method is called to the location to which the output file is written.
Examples
========
Suppose we have template.md in aide_document and a directory as follows:
data/
params.yaml
To render the document:
>>> from aide_document import combine
>>> combine.render_document('template.md', 'data/params.yaml', 'data/output.md')
This will then combine the data and template files and write to a new output file within data/.
"""
# Set up environment and load templates from pip package
env = Environment(loader=PackageLoader('aide_document')) #TODO: add custom path to templates
# Create output file, open template and data files, then combine
with open(output_name, 'w') as output_file:
output = env.get_template(template_name).render(yaml.load(open(data_name)))
output_file.write(output)
|
Combines a MarkDown template file from the aide_document package with a local associated YAML data file, then outputs the rendered combination to a local MarkDown output file.
Parameters
==========
template_name : String
Exact name of the MarkDown template file from the aide_document/templates folder. Do not use the file path.
data_name : String
Relative file path from where this method is called to the location of the YAML data file to be used.
output_name : String
Relative file path from where this method is called to the location to which the output file is written.
Examples
========
Suppose we have template.md in aide_document and a directory as follows:
data/
params.yaml
To render the document:
>>> from aide_document import combine
>>> combine.render_document('template.md', 'data/params.yaml', 'data/output.md')
This will then combine the data and template files and write to a new output file within data/.
|
def create_asset(json):
"""Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
"""
result = Asset(json['sys'])
file_dict = json['fields']['file']
result.fields = json['fields']
result.url = file_dict['url']
result.mimeType = file_dict['contentType']
return result
|
Create :class:`.resources.Asset` from JSON.
:param json: JSON dict.
:return: Asset instance.
|
def _field_sort_name(cls, name):
"""Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order.
"""
if isinstance(cls.__dict__[name], DateItemField):
name = re.sub('year', 'date0', name)
name = re.sub('month', 'date1', name)
name = re.sub('day', 'date2', name)
return name
|
Get a sort key for a field name that determines the order
fields should be written in.
Fields names are kept unchanged, unless they are instances of
:class:`DateItemField`, in which case `year`, `month`, and `day`
are replaced by `date0`, `date1`, and `date2`, respectively, to
make them appear in that order.
|
def parse_atoms(self):
'''All ATOM lines are parsed even though only one per residue needs to be parsed. The reason for parsing all the
lines is just to sanity-checks that the ATOMs within one residue are consistent with each other.'''
atom_site_header_tag = self.main_tag.getElementsByTagName("PDBx:atom_siteCategory")
assert(len(atom_site_header_tag) == 1)
atom_site_header_tag = atom_site_header_tag[0]
atom_site_tags = atom_site_header_tag.getElementsByTagName("PDBx:atom_site")
residue_map = {}
residues_read = {}
int_type = types.IntType
for t in atom_site_tags:
r, seqres, ResidueAA, Residue3AA = PDBML_slow.parse_atom_site(t, self.modified_residues)
if r:
# skip certain ACE residues
if not(self.pdb_id in cases_with_ACE_residues_we_can_ignore and Residue3AA == 'ACE'):
full_residue_id = str(r)
if residues_read.get(full_residue_id):
assert(residues_read[full_residue_id] == (r.ResidueAA, seqres))
else:
residues_read[full_residue_id] = (r.ResidueAA, seqres)
residue_map[r.Chain] = residue_map.get(r.Chain, {})
assert(type(seqres) == int_type)
residue_map[r.Chain][str(r)] = seqres
## Create SequenceMap objects to map the ATOM Sequences to the SEQRES Sequences
atom_to_seqres_sequence_maps = {}
for chain_id, atom_seqres_mapping in residue_map.iteritems():
atom_to_seqres_sequence_maps[chain_id] = SequenceMap.from_dict(atom_seqres_mapping)
self.atom_to_seqres_sequence_maps = atom_to_seqres_sequence_maps
|
All ATOM lines are parsed even though only one per residue needs to be parsed. The reason for parsing all the
lines is just to sanity-checks that the ATOMs within one residue are consistent with each other.
|
def t_whitespace(self, s):
r'\s+'
self.add_token('SPACE', s)
self.pos += len(s)
pass
|
r'\s+
|
def init_publisher(app):
"""Calling this with your flask app as argument is required for the
publisher decorator to work.
"""
@app.context_processor
def inject_links():
return {
'websub_self_url': stack.top.websub_self_url,
'websub_hub_url': stack.top.websub_hub_url,
'websub_self_link': stack.top.websub_self_link,
'websub_hub_link': stack.top.websub_hub_link,
}
|
Calling this with your flask app as argument is required for the
publisher decorator to work.
|
def __tomo_linear_inv(freqs, ops, weights=None, trace=None):
"""
Reconstruct a matrix through linear inversion.
Args:
freqs (list[float]): list of observed frequences.
ops (list[np.array]): list of corresponding projectors.
weights (list[float] or array_like):
weights to be used for weighted fitting.
trace (float or None): trace of returned operator.
Returns:
numpy.array: A numpy array of the reconstructed operator.
"""
# get weights matrix
if weights is not None:
W = np.array(weights)
if W.ndim == 1:
W = np.diag(W)
# Get basis S matrix
S = np.array([vectorize(m).conj()
for m in ops]).reshape(len(ops), ops[0].size)
if weights is not None:
S = np.dot(W, S) # W.S
# get frequencies vec
v = np.array(freqs) # |f>
if weights is not None:
v = np.dot(W, freqs) # W.|f>
Sdg = S.T.conj() # S^*.W^*
inv = np.linalg.pinv(np.dot(Sdg, S)) # (S^*.W^*.W.S)^-1
# linear inversion of freqs
ret = devectorize(np.dot(inv, np.dot(Sdg, v)))
# renormalize to input trace value
if trace is not None:
ret = trace * ret / np.trace(ret)
return ret
|
Reconstruct a matrix through linear inversion.
Args:
freqs (list[float]): list of observed frequences.
ops (list[np.array]): list of corresponding projectors.
weights (list[float] or array_like):
weights to be used for weighted fitting.
trace (float or None): trace of returned operator.
Returns:
numpy.array: A numpy array of the reconstructed operator.
|
def find_branches(self, labels=False, unique=False):
"""Recursively constructs a list of pointers of the tree's structure
Args:
labels (bool): If True, returned lists consist of node labels.
If False (default), lists consist of node
pointers. This option is mostly intended for
debugging purposes.
unique (bool): If True, return lists of all unique, linear branches
of the tree. More accurately, it returns a list
of lists where each list contains a single,
unique, linear path from the calling node to the
tree's leaf nodes. If False (default),
a highly-nested list is returned where each nested
list represents a branch point in the tree.
See Examples for more.
Examples:
>>> from arandomness.trees import OmniTree
>>> a = OmniTree(label='a')
>>> b = OmniTree(label='b', parents=[a])
>>> c = OmniTree(label='c', parents=[b])
>>> d = OmniTree(label='d', parents=[b])
>>> e = OmniTree(label='e', parents=[c, d])
>>> a.find_branches(labels=True)
['a', ['b', ['c', ['e']], ['d', ['e']]]]
>>> a.find_branches(labels=True, unique=True)
[['a', 'b', 'c', 'e'], ['a', 'b', 'd', 'e']]
"""
branches = []
# Assign proper item, pointer or label, to return
if labels is True:
identifier = [self.label]
else:
identifier = [self]
if self._children == []: # Base Case: current node is a leaf/end node
return identifier
else: # Recursive Case: all other nodes
for child in self._children:
if unique is True:
for branch in child.find_branches(labels=labels,
unique=True):
# I don't know why this 'if' is necessary, but it is
if type(branch) is not list:
branch = list(branch)
branches.append(identifier + branch)
else:
branches.append(child.find_branches(labels=labels))
# Proper construction of list depends on 'unique'
if unique is True:
return branches
else:
return identifier + branches
|
Recursively constructs a list of pointers of the tree's structure
Args:
labels (bool): If True, returned lists consist of node labels.
If False (default), lists consist of node
pointers. This option is mostly intended for
debugging purposes.
unique (bool): If True, return lists of all unique, linear branches
of the tree. More accurately, it returns a list
of lists where each list contains a single,
unique, linear path from the calling node to the
tree's leaf nodes. If False (default),
a highly-nested list is returned where each nested
list represents a branch point in the tree.
See Examples for more.
Examples:
>>> from arandomness.trees import OmniTree
>>> a = OmniTree(label='a')
>>> b = OmniTree(label='b', parents=[a])
>>> c = OmniTree(label='c', parents=[b])
>>> d = OmniTree(label='d', parents=[b])
>>> e = OmniTree(label='e', parents=[c, d])
>>> a.find_branches(labels=True)
['a', ['b', ['c', ['e']], ['d', ['e']]]]
>>> a.find_branches(labels=True, unique=True)
[['a', 'b', 'c', 'e'], ['a', 'b', 'd', 'e']]
|
def __update_html(self, html):
"""
Updates the View with given html content.
:param html: Html content.
:type html: unicode
"""
if platform.system() in ("Windows", "Microsoft"):
html = re.sub(r"((?:[a-zA-Z]\:|\\\\[\w\.]+\\[\w.$]+)\\(?:[\w]+\\)*\w([\w.])+)",
lambda x: foundations.strings.to_forward_slashes(x.group(1)),
html)
html = foundations.strings.replace(html, OrderedDict([('"', '\\"'), ("\n", "")]))
self.__evaluate_javascript("$(\"#report\").html(\"{0}\");".format(html))
|
Updates the View with given html content.
:param html: Html content.
:type html: unicode
|
def _future_command_unlocked(self, cmd):
"""Run command as a coroutine and return a future.
Args:
loop (BackgroundEventLoop): The loop that we should attach
the future too.
cmd (list): The command and arguments that we wish to call.
Returns:
asyncio.Future: An awaitable future with the result of the operation.
"""
future = self._loop.create_future()
asyncio_loop = self._loop.get_loop()
def _done_callback(result):
retval = result['return_value']
if not result['result']:
future.set_exception(HardwareError("Error executing synchronous command",
command=cmd, return_value=retval))
else:
future.set_result(retval)
callback = functools.partial(asyncio_loop.call_soon_threadsafe, _done_callback)
self._commands.put((cmd, callback, True, None))
return future
|
Run command as a coroutine and return a future.
Args:
loop (BackgroundEventLoop): The loop that we should attach
the future too.
cmd (list): The command and arguments that we wish to call.
Returns:
asyncio.Future: An awaitable future with the result of the operation.
|
def register (self, target):
""" Registers a new virtual target. Checks if there's already registered target, with the same
name, type, project and subvariant properties, and also with the same sources
and equal action. If such target is found it is retured and 'target' is not registered.
Otherwise, 'target' is registered and returned.
"""
assert isinstance(target, VirtualTarget)
if target.path():
signature = target.path() + "-" + target.name()
else:
signature = "-" + target.name()
result = None
if signature not in self.cache_:
self.cache_ [signature] = []
for t in self.cache_ [signature]:
a1 = t.action ()
a2 = target.action ()
# TODO: why are we checking for not result?
if not result:
if not a1 and not a2:
result = t
else:
if a1 and a2 and a1.action_name () == a2.action_name () and a1.sources () == a2.sources ():
ps1 = a1.properties ()
ps2 = a2.properties ()
p1 = ps1.base () + ps1.free () +\
b2.util.set.difference(ps1.dependency(), ps1.incidental())
p2 = ps2.base () + ps2.free () +\
b2.util.set.difference(ps2.dependency(), ps2.incidental())
if p1 == p2:
result = t
if not result:
self.cache_ [signature].append (target)
result = target
# TODO: Don't append if we found pre-existing target?
self.recent_targets_.append(result)
self.all_targets_.append(result)
return result
|
Registers a new virtual target. Checks if there's already registered target, with the same
name, type, project and subvariant properties, and also with the same sources
and equal action. If such target is found it is retured and 'target' is not registered.
Otherwise, 'target' is registered and returned.
|
def _list_store_resources(self, request, head_id, filter_ids,
resource_fetcher, block_xform):
"""Builds a list of blocks or resources derived from blocks,
handling multiple possible filter requests:
- filtered by a set of ids
- filtered by head block
- filtered by both id and head block
- not filtered (all current resources)
Note:
This method will fail if `_block_store` has not been set
Args:
request (object): The parsed protobuf request object
head_id (str): Either request.head_id, or the current chain head
filter_ids (list of str): the resource ids (if any) to filter by
resource_fetcher (function): Fetches a resource by its id
Expected args:
resource_id: The id of the resource to be fetched
Expected return:
object: The resource to be appended to the results
block_xform (function): Transforms a block into a list of resources
Expected args:
block: A block object from the block store
Expected return:
list: To be concatenated to the end of the results
Returns:
list: List of blocks or data from blocks. If filtered by ids,
they will be listed in the same order as the id filters,
otherwise they will be ordered from newest to oldest
"""
resources = []
# Simply fetch by id if filtered by id but not by head block
if filter_ids and not request.head_id:
for resource_id in filter_ids:
try:
resources.append(resource_fetcher(resource_id))
except (KeyError, ValueError, TypeError):
# Invalid ids should be omitted, not raise an exception
pass
# Traverse block chain to build results for most scenarios
else:
current_id = head_id
while current_id in self._block_store:
block = self._block_store[current_id].block
resources += block_xform(block)
header = BlockHeader()
header.ParseFromString(block.header)
current_id = header.previous_block_id
# If filtering by head AND ids, the traverse results must be winnowed
if request.head_id and filter_ids:
matches = {
r.header_signature: r
for r in resources if r.header_signature in filter_ids
}
resources = [matches[i] for i in filter_ids if i in matches]
return resources
|
Builds a list of blocks or resources derived from blocks,
handling multiple possible filter requests:
- filtered by a set of ids
- filtered by head block
- filtered by both id and head block
- not filtered (all current resources)
Note:
This method will fail if `_block_store` has not been set
Args:
request (object): The parsed protobuf request object
head_id (str): Either request.head_id, or the current chain head
filter_ids (list of str): the resource ids (if any) to filter by
resource_fetcher (function): Fetches a resource by its id
Expected args:
resource_id: The id of the resource to be fetched
Expected return:
object: The resource to be appended to the results
block_xform (function): Transforms a block into a list of resources
Expected args:
block: A block object from the block store
Expected return:
list: To be concatenated to the end of the results
Returns:
list: List of blocks or data from blocks. If filtered by ids,
they will be listed in the same order as the id filters,
otherwise they will be ordered from newest to oldest
|
def post(self, action, data=None, headers=None):
"""Makes a GET request
"""
return self.request(make_url(self.endpoint, action), method='POST', data=data,
headers=headers)
|
Makes a GET request
|
def setColor(self, key, value):
"""
Sets the color value for the inputed color.
:param key | <unicode>
value | <QtGui.QColor>
"""
key = nativestring(key).capitalize()
self._colorSet.setColor(key, value)
# update the palette information
if ( key == 'Background' ):
palette = self.palette()
palette.setColor( palette.Base, value )
self.setPalette(palette)
|
Sets the color value for the inputed color.
:param key | <unicode>
value | <QtGui.QColor>
|
def matches(self, client, event_data):
"""True if all filters are matching."""
for f in self.filters:
if not f(client, event_data):
return False
return True
|
True if all filters are matching.
|
def invalidate(self):
"""
Invalidate access tokens with a client/access token pair
Returns:
dict: Empty or error dict
"""
endpoint = '/invalidate'
payload = {
'accessToken': self.access_token,
'clientToken': self.client_token,
}
self._ygg_req(endpoint, payload)
self.client_token = ''
self.access_token = ''
self.available_profiles = []
self.selected_profile = {}
return True
|
Invalidate access tokens with a client/access token pair
Returns:
dict: Empty or error dict
|
def _update_camera_pos(self):
""" Set the camera position and orientation"""
# transform will be updated several times; do not update camera
# transform until we are done.
ch_em = self.events.transform_change
with ch_em.blocker(self._update_transform):
tr = self.transform
tr.reset()
up, forward, right = self._get_dim_vectors()
# Create mapping so correct dim is up
pp1 = np.array([(0, 0, 0), (0, 0, -1), (1, 0, 0), (0, 1, 0)])
pp2 = np.array([(0, 0, 0), forward, right, up])
tr.set_mapping(pp1, pp2)
tr.translate(-self._actual_distance * forward)
self._rotate_tr()
tr.scale([1.0/a for a in self._flip_factors])
tr.translate(np.array(self.center))
|
Set the camera position and orientation
|
def resolve_and_call(self, func, extra_env=None):
""" Resolve function arguments and call them, possibily filling from the environment """
kwargs = self.resolve_parameters(func, extra_env=extra_env)
return func(**kwargs)
|
Resolve function arguments and call them, possibily filling from the environment
|
def _expand_numparse(disable_numparse, column_count):
"""
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
"""
if isinstance(disable_numparse, Iterable):
numparses = [True] * column_count
for index in disable_numparse:
numparses[index] = False
return numparses
else:
return [not disable_numparse] * column_count
|
Return a list of bools of length `column_count` which indicates whether
number parsing should be used on each column.
If `disable_numparse` is a list of indices, each of those indices are False,
and everything else is True.
If `disable_numparse` is a bool, then the returned list is all the same.
|
def byaxis(self):
"""Return the subspace defined along one or several dimensions.
Examples
--------
Indexing with integers or slices:
>>> space = odl.rn((2, 3, 4))
>>> space.byaxis[0]
rn(2)
>>> space.byaxis[1:]
rn((3, 4))
Lists can be used to stack spaces arbitrarily:
>>> space.byaxis[[2, 1, 2]]
rn((4, 3, 4))
"""
space = self
class NpyTensorSpacebyaxis(object):
"""Helper class for indexing by axis."""
def __getitem__(self, indices):
"""Return ``self[indices]``."""
try:
iter(indices)
except TypeError:
newshape = space.shape[indices]
else:
newshape = tuple(space.shape[i] for i in indices)
if isinstance(space.weighting, ArrayWeighting):
new_array = np.asarray(space.weighting.array[indices])
weighting = NumpyTensorSpaceArrayWeighting(
new_array, space.weighting.exponent)
else:
weighting = space.weighting
return type(space)(newshape, space.dtype, weighting=weighting)
def __repr__(self):
"""Return ``repr(self)``."""
return repr(space) + '.byaxis'
return NpyTensorSpacebyaxis()
|
Return the subspace defined along one or several dimensions.
Examples
--------
Indexing with integers or slices:
>>> space = odl.rn((2, 3, 4))
>>> space.byaxis[0]
rn(2)
>>> space.byaxis[1:]
rn((3, 4))
Lists can be used to stack spaces arbitrarily:
>>> space.byaxis[[2, 1, 2]]
rn((4, 3, 4))
|
def lock_status(self, resource_id, parent_id=None, account_id=None):
"""Get the lock status for a given resource.
for security groups, parent id is their vpc.
"""
account_id = self.get_account_id(account_id)
params = parent_id and {'parent_id': parent_id} or None
return self.http.get(
"%s/%s/locks/%s" % (self.endpoint, account_id, resource_id),
params=params, auth=self.get_api_auth())
|
Get the lock status for a given resource.
for security groups, parent id is their vpc.
|
def find_children(self):
"""Take a tree and set the children according to the parents.
Takes a tree structure which lists the parents of each vertex
and computes the children for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].children = []
for i in range(len(self.vertices)):
for parent in self.vertices[i].parents:
if i not in self.vertices[parent].children:
self.vertices[parent].children.append(i)
|
Take a tree and set the children according to the parents.
Takes a tree structure which lists the parents of each vertex
and computes the children for each vertex and places them in.
|
def remove(self, *members):
""" Removes @members from the set
-> #int the number of members that were removed from the set
"""
if self.serialized:
members = list(map(self._dumps, members))
return self._client.srem(self.key_prefix, *members)
|
Removes @members from the set
-> #int the number of members that were removed from the set
|
def eth_getBlockByNumber(self, block=BLOCK_TAG_LATEST, tx_objects=True):
"""TODO: documentation
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbynumber
TESTED
"""
block = validate_block(block)
return self._call("eth_getBlockByNumber", [block, tx_objects])
|
TODO: documentation
https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_getblockbynumber
TESTED
|
def list(self, pattern='*'):
"""Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters.
"""
if self._descriptors is None:
self._descriptors = self._client.list_metric_descriptors(
filter_string=self._filter_string, type_prefix=self._type_prefix)
return [metric for metric in self._descriptors
if fnmatch.fnmatch(metric.type, pattern)]
|
Returns a list of metric descriptors that match the filters.
Args:
pattern: An optional pattern to further filter the descriptors. This can
include Unix shell-style wildcards. E.g. ``"compute*"``,
``"*cpu/load_??m"``.
Returns:
A list of MetricDescriptor objects that match the filters.
|
def _raw(cls, vertices, edges, out_edges, in_edges, head, tail):
"""
Private constructor for direct construction
of an ObjectGraph from its attributes.
vertices is the collection of vertices
out_edges and in_edges map vertices to lists of edges
head and tail map edges to objects.
"""
self = object.__new__(cls)
self._out_edges = out_edges
self._in_edges = in_edges
self._head = head
self._tail = tail
self._vertices = vertices
self._edges = edges
return self
|
Private constructor for direct construction
of an ObjectGraph from its attributes.
vertices is the collection of vertices
out_edges and in_edges map vertices to lists of edges
head and tail map edges to objects.
|
def i18n(msg, event=None, lang='en', domain='backend'):
"""Gettext function wrapper to return a message in a specified language by domain
To use internationalization (i18n) on your messages, import it as '_' and use as usual.
Do not forget to supply the client's language setting."""
if event is not None:
language = event.client.language
else:
language = lang
domain = Domain(domain)
return domain.get(language, msg)
|
Gettext function wrapper to return a message in a specified language by domain
To use internationalization (i18n) on your messages, import it as '_' and use as usual.
Do not forget to supply the client's language setting.
|
def ring_coding(array):
"""
Produces matplotlib Path codes for exterior and interior rings
of a polygon geometry.
"""
# The codes will be all "LINETO" commands, except for "MOVETO"s at the
# beginning of each subpath
n = len(array)
codes = np.ones(n, dtype=Path.code_type) * Path.LINETO
codes[0] = Path.MOVETO
codes[-1] = Path.CLOSEPOLY
return codes
|
Produces matplotlib Path codes for exterior and interior rings
of a polygon geometry.
|
def write_utf(self, s):
"""Writes a UTF-8 string of a given length to the packet"""
utfstr = s.encode('utf-8')
length = len(utfstr)
if length > 64:
raise NamePartTooLongException
self.write_byte(length)
self.write_string(utfstr, length)
|
Writes a UTF-8 string of a given length to the packet
|
def preprocess_async(train_dataset, output_dir, eval_dataset=None, checkpoint=None, cloud=None):
"""Preprocess data. Produce output that can be used by training efficiently.
Args:
train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If eval_dataset is None, the pipeline will randomly split train_dataset into
train/eval set with 7:3 ratio.
output_dir: The output directory to use. Preprocessing will create a sub directory under
it for each run, and also update "latest" file which points to the latest preprocessed
directory. Users are responsible for cleanup. Can be local or GCS path.
eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If specified, it will be used for evaluation during training, and train_dataset will be
completely used for training.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
if cloud is None:
return _local.Local.preprocess(train_dataset, output_dir, eval_dataset, checkpoint)
if not isinstance(cloud, dict):
cloud = {}
return _cloud.Cloud.preprocess(train_dataset, output_dir, eval_dataset, checkpoint, cloud)
|
Preprocess data. Produce output that can be used by training efficiently.
Args:
train_dataset: training data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If eval_dataset is None, the pipeline will randomly split train_dataset into
train/eval set with 7:3 ratio.
output_dir: The output directory to use. Preprocessing will create a sub directory under
it for each run, and also update "latest" file which points to the latest preprocessed
directory. Users are responsible for cleanup. Can be local or GCS path.
eval_dataset: evaluation data source to preprocess. Can be CsvDataset or BigQueryDataSet.
If specified, it will be used for evaluation during training, and train_dataset will be
completely used for training.
checkpoint: the Inception checkpoint to use. If None, a default checkpoint is used.
cloud: a DataFlow pipeline option dictionary such as {'num_workers': 3}. If anything but
not None, it will run in cloud. Otherwise, it runs locally.
Returns:
A google.datalab.utils.Job object that can be used to query state from or wait.
|
def save_figures(block, block_vars, gallery_conf):
"""Save all open figures of the example code-block.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
images_rst : str
rst code to embed the images in the document.
"""
image_path_iterator = block_vars['image_path_iterator']
all_rst = u''
prev_count = len(image_path_iterator)
for scraper in gallery_conf['image_scrapers']:
rst = scraper(block, block_vars, gallery_conf)
if not isinstance(rst, basestring):
raise TypeError('rst from scraper %r was not a string, '
'got type %s:\n%r'
% (scraper, type(rst), rst))
n_new = len(image_path_iterator) - prev_count
for ii in range(n_new):
current_path, _ = _find_image_ext(
image_path_iterator.paths[prev_count + ii])
if not os.path.isfile(current_path):
raise RuntimeError('Scraper %s did not produce expected image:'
'\n%s' % (scraper, current_path))
all_rst += rst
return all_rst
|
Save all open figures of the example code-block.
Parameters
----------
block : tuple
A tuple containing the (label, content, line_number) of the block.
block_vars : dict
Dict of block variables.
gallery_conf : dict
Contains the configuration of Sphinx-Gallery
Returns
-------
images_rst : str
rst code to embed the images in the document.
|
def get_points_and_weights(w_func=lambda x : np.ones(x.shape),
left=-1.0, right=1.0, num_points=5, n=4096):
"""Quadratude points and weights for a weighting function.
Points and weights for approximating the integral
I = \int_left^right f(x) w(x) dx
given the weighting function w(x) using the approximation
I ~ w_i f(x_i)
Args:
w_func: The weighting function w(x). Must be a function that takes
one argument and is valid over the open interval (left, right).
left: The left boundary of the interval
right: The left boundary of the interval
num_points: number of integration points to return
n: the number of points to evaluate w_func at.
Returns:
A tuple (points, weights) where points is a sorted array of the
points x_i and weights gives the corresponding weights w_i.
"""
dx = (float(right)-left)/n
z = np.hstack(np.linspace(left+0.5*dx, right-0.5*dx, n))
w = dx*w_func(z)
(a, b) = discrete_gautschi(z, w, num_points)
alpha = a
beta = np.sqrt(b)
J = np.diag(alpha)
J += np.diag(beta, k=-1)
J += np.diag(beta, k=1)
(points,v) = np.linalg.eigh(J)
ind = points.argsort()
points = points[ind]
weights = v[0,:]**2 * w.sum()
weights = weights[ind]
return (points, weights)
|
Quadratude points and weights for a weighting function.
Points and weights for approximating the integral
I = \int_left^right f(x) w(x) dx
given the weighting function w(x) using the approximation
I ~ w_i f(x_i)
Args:
w_func: The weighting function w(x). Must be a function that takes
one argument and is valid over the open interval (left, right).
left: The left boundary of the interval
right: The left boundary of the interval
num_points: number of integration points to return
n: the number of points to evaluate w_func at.
Returns:
A tuple (points, weights) where points is a sorted array of the
points x_i and weights gives the corresponding weights w_i.
|
def submit_form(self, form, submit=None, **kwargs):
"""Submit a form.
:param Form form: Filled-out form object
:param Submit submit: Optional `Submit` to click, if form includes
multiple submits
:param kwargs: Keyword arguments to `Session::send`
"""
# Get HTTP verb
method = form.method.upper()
# Send request
url = self._build_url(form.action) or self.url
payload = form.serialize(submit=submit)
serialized = payload.to_requests(method)
send_args = self._build_send_args(**kwargs)
send_args.update(serialized)
response = self.session.request(method, url, **send_args)
# Update history
self._update_state(response)
|
Submit a form.
:param Form form: Filled-out form object
:param Submit submit: Optional `Submit` to click, if form includes
multiple submits
:param kwargs: Keyword arguments to `Session::send`
|
def predict_proba(self, X):
"""Return the output of the module's forward method as a numpy
array.
If the module's forward method returns multiple outputs as a
tuple, it is assumed that the first output contains the
relevant information and the other values are ignored. If all
values are relevant, consider using
:func:`~skorch.NeuralNet.forward` instead.
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
Returns
-------
y_proba : numpy ndarray
"""
y_probas = []
for yp in self.forward_iter(X, training=False):
yp = yp[0] if isinstance(yp, tuple) else yp
y_probas.append(to_numpy(yp))
y_proba = np.concatenate(y_probas, 0)
return y_proba
|
Return the output of the module's forward method as a numpy
array.
If the module's forward method returns multiple outputs as a
tuple, it is assumed that the first output contains the
relevant information and the other values are ignored. If all
values are relevant, consider using
:func:`~skorch.NeuralNet.forward` instead.
Parameters
----------
X : input data, compatible with skorch.dataset.Dataset
By default, you should be able to pass:
* numpy arrays
* torch tensors
* pandas DataFrame or Series
* scipy sparse CSR matrices
* a dictionary of the former three
* a list/tuple of the former three
* a Dataset
If this doesn't work with your data, you have to pass a
``Dataset`` that can deal with the data.
Returns
-------
y_proba : numpy ndarray
|
def get_method_serializers(self, http_method):
"""Get request method serializers + default media type.
Grab serializers from ``method_serializers`` if defined, otherwise
returns the default serializers. Uses GET serializers for HEAD requests
if no HEAD serializers were specified.
The method also determines the default media type.
:param http_method: HTTP method as a string.
:returns: Tuple of serializers and default media type.
"""
if http_method == 'HEAD' and 'HEAD' not in self.method_serializers:
http_method = 'GET'
return (
self.method_serializers.get(http_method, self.serializers),
self.default_method_media_type.get(
http_method, self.default_media_type)
)
|
Get request method serializers + default media type.
Grab serializers from ``method_serializers`` if defined, otherwise
returns the default serializers. Uses GET serializers for HEAD requests
if no HEAD serializers were specified.
The method also determines the default media type.
:param http_method: HTTP method as a string.
:returns: Tuple of serializers and default media type.
|
def get_protocol_version(protocol=None, target=None):
"""
Return a suitable pickle protocol version for a given target.
Arguments:
target: The internals description of the targeted python
version. If this is ``None`` the specification of the currently
running python version will be used.
protocol(None or int): The requested protocol version (or None for the
default of the target python version).
Returns:
int: A suitable pickle protocol version.
"""
target = get_py_internals(target)
if protocol is None:
protocol = target['pickle_default_protocol']
if protocol > cPickle.HIGHEST_PROTOCOL:
warnings.warn('Downgrading pickle protocol, running python supports up to %d.' % cPickle.HIGHEST_PROTOCOL)
protocol = cPickle.HIGHEST_PROTOCOL
target_highest_protocol = target['pickle_highest_protocol']
if protocol > target_highest_protocol:
warnings.warn('Downgrading pickle protocol, target python supports up to %d.' % target_highest_protocol)
protocol = target_highest_protocol
return protocol
|
Return a suitable pickle protocol version for a given target.
Arguments:
target: The internals description of the targeted python
version. If this is ``None`` the specification of the currently
running python version will be used.
protocol(None or int): The requested protocol version (or None for the
default of the target python version).
Returns:
int: A suitable pickle protocol version.
|
def get_free_region(self, width, height):
"""Get a free region of given size and allocate it
Parameters
----------
width : int
Width of region to allocate
height : int
Height of region to allocate
Returns
-------
bounds : tuple | None
A newly allocated region as (x, y, w, h) or None
(if failed).
"""
best_height = best_width = np.inf
best_index = -1
for i in range(len(self._atlas_nodes)):
y = self._fit(i, width, height)
if y >= 0:
node = self._atlas_nodes[i]
if (y+height < best_height or
(y+height == best_height and node[2] < best_width)):
best_height = y+height
best_index = i
best_width = node[2]
region = node[0], y, width, height
if best_index == -1:
return None
node = region[0], region[1] + height, width
self._atlas_nodes.insert(best_index, node)
i = best_index+1
while i < len(self._atlas_nodes):
node = self._atlas_nodes[i]
prev_node = self._atlas_nodes[i-1]
if node[0] < prev_node[0]+prev_node[2]:
shrink = prev_node[0]+prev_node[2] - node[0]
x, y, w = self._atlas_nodes[i]
self._atlas_nodes[i] = x+shrink, y, w-shrink
if self._atlas_nodes[i][2] <= 0:
del self._atlas_nodes[i]
i -= 1
else:
break
else:
break
i += 1
# Merge nodes
i = 0
while i < len(self._atlas_nodes)-1:
node = self._atlas_nodes[i]
next_node = self._atlas_nodes[i+1]
if node[1] == next_node[1]:
self._atlas_nodes[i] = node[0], node[1], node[2]+next_node[2]
del self._atlas_nodes[i+1]
else:
i += 1
return region
|
Get a free region of given size and allocate it
Parameters
----------
width : int
Width of region to allocate
height : int
Height of region to allocate
Returns
-------
bounds : tuple | None
A newly allocated region as (x, y, w, h) or None
(if failed).
|
def parse_version(version: str) -> tuple:
"""Parse a string formatted X[.Y.Z] version number into a tuple
>>> parse_version('10.2.3')
(10, 2, 3)
>>> parse_version('12')
(12, 0, 0)
"""
if not version:
return None
parts = version.split('.')
missing = 3 - len(parts)
return tuple(int(i) for i in parts + ([0] * missing))
|
Parse a string formatted X[.Y.Z] version number into a tuple
>>> parse_version('10.2.3')
(10, 2, 3)
>>> parse_version('12')
(12, 0, 0)
|
def prepare(self, cache):
"""Prepare to run next shot."""
if cache is not None:
np.copyto(self.qubits, cache)
else:
self.qubits.fill(0.0)
self.qubits[0] = 1.0
self.cregs = [0] * self.n_qubits
|
Prepare to run next shot.
|
def conditional_accept(self):
"""Accepts the inputs if all values are valid and congruent.
i.e. Valid datafile and frequency range within the given calibration dataset."""
if self.ui.calfileRadio.isChecked() and str(self.ui.calChoiceCmbbx.currentText()) == '':
self.ui.noneRadio.setChecked(True)
if self.ui.calfileRadio.isChecked():
try:
x, freqs = self.datafile.get_calibration(str(self.ui.calChoiceCmbbx.currentText()), self.calf)
except IOError:
QtGui.QMessageBox.warning(self, "File Read Error", "Unable to read calibration file")
return
except KeyError:
QtGui.QMessageBox.warning(self, "File Data Error", "Unable to find data in file")
return
if self.ui.frangeLowSpnbx.value() < freqs[0] or \
self.ui.frangeHighSpnbx.value() > freqs[-1]:
QtGui.QMessageBox.warning(self, "Invalid Frequency Range",
"Provided frequencys outside of calibration file range of {} - {} Hz".format(freqs[0], freqs[-1]))
return
self.accept()
|
Accepts the inputs if all values are valid and congruent.
i.e. Valid datafile and frequency range within the given calibration dataset.
|
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.iteritems():
if key not in fetch_directives: continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
|
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
|
def validate_key(request, group=None, perm=None, keytype=None):
"""
Validate the given key
"""
def update_last_access():
if KEY_LAST_USED_UPDATE:
request.key.save()
if request.user.is_authenticated() and is_valid_consumer(request):
if not group and not perm and not keytype:
return update_last_access()
elif keytype:
if request.key.is_type( keytype ):
return update_last_access()
elif group:
if request.key.belongs_to_group( group ):
return update_last_access()
elif perm:
if request.key.has_perm( perm ):
return update_last_access()
raise AccessForbidden
raise AccessUnauthorized
|
Validate the given key
|
def receive(self, msg):
"""
Returns a (receiver, msg) pair, where receiver is `None` if no route for
the message was found, or otherwise an object with a `receive` method
that can accept that `msg`.
"""
x = self.routing
while not isinstance(x, ActionList):
if not x or not msg:
return None, msg
if not isinstance(x, dict):
raise ValueError('Unexpected type %s' % type(x))
_, value = msg.popitem(last=False)
x = x.get(str(value))
return x, msg
|
Returns a (receiver, msg) pair, where receiver is `None` if no route for
the message was found, or otherwise an object with a `receive` method
that can accept that `msg`.
|
def _ecc_encode_compressed_point(private_key):
"""Encodes a compressed elliptic curve point
as described in SEC-1 v2 section 2.3.3
http://www.secg.org/sec1-v2.pdf
:param private_key: Private key from which to extract point data
:type private_key: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey
:returns: Encoded compressed elliptic curve point
:rtype: bytes
:raises NotSupportedError: for non-prime curves
"""
# key_size is in bits. Convert to bytes and round up
byte_length = (private_key.curve.key_size + 7) // 8
public_numbers = private_key.public_key().public_numbers()
y_map = [b"\x02", b"\x03"]
# If curve in prime field.
if private_key.curve.name.startswith("secp"):
y_order = public_numbers.y % 2
y = y_map[y_order]
else:
raise NotSupportedError("Non-prime curves are not supported at this time")
return y + int_to_bytes(public_numbers.x, byte_length)
|
Encodes a compressed elliptic curve point
as described in SEC-1 v2 section 2.3.3
http://www.secg.org/sec1-v2.pdf
:param private_key: Private key from which to extract point data
:type private_key: cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey
:returns: Encoded compressed elliptic curve point
:rtype: bytes
:raises NotSupportedError: for non-prime curves
|
def default_multivariate_normal_fn(dtype, shape, name, trainable,
add_variable_fn):
"""Creates multivariate standard `Normal` distribution.
Args:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Returns:
Multivariate standard `Normal` distribution.
"""
del name, trainable, add_variable_fn # unused
dist = tfd.Normal(loc=tf.zeros(shape, dtype), scale=dtype.as_numpy_dtype(1))
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
|
Creates multivariate standard `Normal` distribution.
Args:
dtype: Type of parameter's event.
shape: Python `list`-like representing the parameter's event shape.
name: Python `str` name prepended to any created (or existing)
`tf.Variable`s.
trainable: Python `bool` indicating all created `tf.Variable`s should be
added to the graph collection `GraphKeys.TRAINABLE_VARIABLES`.
add_variable_fn: `tf.get_variable`-like `callable` used to create (or
access existing) `tf.Variable`s.
Returns:
Multivariate standard `Normal` distribution.
|
def check_rights(self, resources, request=None):
""" Check rights for resources.
:return bool: True if operation is success else HTTP_403_FORBIDDEN
"""
if not self.auth:
return True
try:
if not self.auth.test_rights(resources, request=request):
raise AssertionError()
except AssertionError, e:
raise HttpError("Access forbiden. {0}".format(e), status=status.HTTP_403_FORBIDDEN)
|
Check rights for resources.
:return bool: True if operation is success else HTTP_403_FORBIDDEN
|
def get_task_summary(self, task_name):
"""
Get a task's summary, mostly used for MapReduce.
:param task_name: task name
:return: summary as a dict parsed from JSON
:rtype: dict
"""
params = {'instancesummary': '', 'taskname': task_name}
resp = self._client.get(self.resource(), params=params)
map_reduce = resp.json().get('Instance')
if map_reduce:
json_summary = map_reduce.get('JsonSummary')
if json_summary:
summary = Instance.TaskSummary(json.loads(json_summary))
summary.summary_text = map_reduce.get('Summary')
summary.json_summary = json_summary
return summary
|
Get a task's summary, mostly used for MapReduce.
:param task_name: task name
:return: summary as a dict parsed from JSON
:rtype: dict
|
def ekrced(handle, segno, recno, column, nelts=_SPICE_EK_EKRCEX_ROOM_DEFAULT):
"""
Read data from a double precision column in a specified EK record.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekrced_c.html
:param handle: Handle attached to EK file.
:type handle: int
:param segno: Index of segment containing record.
:type segno: int
:param recno: Record from which data is to be read.
:type recno: int
:param column: Column name.
:type column: str
:return:
Number of values in column entry,
Float values in column entry,
Flag indicating whether column entry is null.
:rtype: tuple
"""
handle = ctypes.c_int(handle)
segno = ctypes.c_int(segno)
recno = ctypes.c_int(recno)
column = stypes.stringToCharP(column)
nvals = ctypes.c_int(0)
dvals = stypes.emptyDoubleVector(nelts)
isnull = ctypes.c_int()
libspice.ekrced_c(handle, segno, recno, column, ctypes.byref(nvals), dvals,
ctypes.byref(isnull))
assert failed() or (nvals.value <= nelts)
return nvals.value, stypes.cVectorToPython(dvals)[:nvals.value], bool(isnull.value)
|
Read data from a double precision column in a specified EK record.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekrced_c.html
:param handle: Handle attached to EK file.
:type handle: int
:param segno: Index of segment containing record.
:type segno: int
:param recno: Record from which data is to be read.
:type recno: int
:param column: Column name.
:type column: str
:return:
Number of values in column entry,
Float values in column entry,
Flag indicating whether column entry is null.
:rtype: tuple
|
def users_create_many(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/users#create-many-users"
api_path = "/api/v2/users/create_many.json"
return self.call(api_path, method="POST", data=data, **kwargs)
|
https://developer.zendesk.com/rest_api/docs/core/users#create-many-users
|
def set_zerg_client_params(self, server_sockets, use_fallback_socket=None):
"""Zerg mode. Zergs params.
:param str|unicode|list[str|unicode] server_sockets: Attaches zerg to a zerg server.
:param bool use_fallback_socket: Fallback to normal sockets if the zerg server is not available
"""
self._set('zerg', server_sockets, multi=True)
if use_fallback_socket is not None:
self._set('zerg-fallback', use_fallback_socket, cast=bool)
for socket in listify(server_sockets):
self._section.networking.register_socket(self._section.networking.sockets.default(socket))
return self._section
|
Zerg mode. Zergs params.
:param str|unicode|list[str|unicode] server_sockets: Attaches zerg to a zerg server.
:param bool use_fallback_socket: Fallback to normal sockets if the zerg server is not available
|
def listen(self, **kwargs: Any) -> Server:
"""
bind host, port or sock
"""
loop = cast(asyncio.AbstractEventLoop, self._loop)
return (yield from loop.create_server(
lambda: self._protocol(
loop=loop,
handle=self._handle,
requset_charset=self.requset_charset,
response_charset=self.response_charset,
),
**kwargs,
))
|
bind host, port or sock
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.