code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def create_context_store(name='default',
ttl=settings.CONTEXT_DEFAULT_TTL,
store=settings.CONTEXT_STORE) -> 'BaseContextStore':
"""
Create a context store. By default using the default configured context
store, but you can use a custom class if you want to using the `store`
setting.
The time to live of each store (aka there is one per conversation) is
defined by the `ttl` value, which is also inferred by default from the
configuration.
You can have several stores existing in parallel. To make the distinction
between them you need to give them different names, using the `name`
parameter.
The usage looks like:
>>> cs = create_context_store()
>>> class Hello(BaseTestState):
>>> @cs.inject(['foo'])
>>> async def handle(self, context):
>>> logger.debug('foo is %s', context['foo'])
>>>
>>> async def missing_context(self):
>>> self.send(lyr.Text('`foo` is not in context'))
This requires that `foo` is present in the context in order to enter the
handler.
See `BaseContextStore.inject()` for more info.
"""
store_class = import_class(store['class'])
return store_class(name=name, ttl=ttl, **store['params'])
|
Create a context store. By default using the default configured context
store, but you can use a custom class if you want to using the `store`
setting.
The time to live of each store (aka there is one per conversation) is
defined by the `ttl` value, which is also inferred by default from the
configuration.
You can have several stores existing in parallel. To make the distinction
between them you need to give them different names, using the `name`
parameter.
The usage looks like:
>>> cs = create_context_store()
>>> class Hello(BaseTestState):
>>> @cs.inject(['foo'])
>>> async def handle(self, context):
>>> logger.debug('foo is %s', context['foo'])
>>>
>>> async def missing_context(self):
>>> self.send(lyr.Text('`foo` is not in context'))
This requires that `foo` is present in the context in order to enter the
handler.
See `BaseContextStore.inject()` for more info.
|
def query_artifacts(job_ids, log):
"""Query API again for artifacts.
:param iter job_ids: List of AppVeyor jobIDs.
:param logging.Logger log: Logger for this function. Populated by with_log() decorator.
:return: List of tuples: (job ID, artifact file name, artifact file size).
:rtype: list
"""
jobs_artifacts = list()
for job in job_ids:
url = '/buildjobs/{0}/artifacts'.format(job)
log.debug('Querying AppVeyor artifact API for %s...', job)
json_data = query_api(url)
for artifact in json_data:
jobs_artifacts.append((job, artifact['fileName'], artifact['size']))
return jobs_artifacts
|
Query API again for artifacts.
:param iter job_ids: List of AppVeyor jobIDs.
:param logging.Logger log: Logger for this function. Populated by with_log() decorator.
:return: List of tuples: (job ID, artifact file name, artifact file size).
:rtype: list
|
def file_list(*packages, **kwargs):
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's rpm database (not generally
recommended).
root
use root as top level directory (default: "/")
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd
salt '*' lowpkg.file_list httpd postfix
salt '*' lowpkg.file_list
'''
cmd = ['rpm']
if kwargs.get('root'):
cmd.extend(['--root', kwargs['root']])
cmd.append('-ql' if packages else '-qla')
if packages:
# Can't concatenate a tuple, must do a list.extend()
cmd.extend(packages)
ret = __salt__['cmd.run'](
cmd,
output_loglevel='trace',
python_shell=False).splitlines()
return {'errors': [], 'files': ret}
|
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's rpm database (not generally
recommended).
root
use root as top level directory (default: "/")
CLI Examples:
.. code-block:: bash
salt '*' lowpkg.file_list httpd
salt '*' lowpkg.file_list httpd postfix
salt '*' lowpkg.file_list
|
def make_path_qs(self, items):
'''Returns a relative path complete with query string for the given
dictionary of items.
Any items with keys matching this rule's url pattern will be inserted
into the path. Any remaining items will be appended as query string
parameters.
All items will be urlencoded. Any items which are not instances of
basestring, or int/long will be pickled before being urlencoded.
.. warning:: The pickling of items only works for key/value pairs which
will be in the query string. This behavior should only be
used for the simplest of python objects. It causes the
URL to get very lengthy (and unreadable) and XBMC has a
hard limit on URL length. See the caching section if you
need to persist a large amount of data between requests.
'''
# Convert any ints and longs to strings
for key, val in items.items():
if isinstance(val, (int, long)):
items[key] = str(val)
# First use our defaults passed when registering the rule
url_items = dict((key, val) for key, val in self._options.items()
if key in self._keywords)
# Now update with any items explicitly passed to url_for
url_items.update((key, val) for key, val in items.items()
if key in self._keywords)
# Create the path
path = self._make_path(url_items)
# Extra arguments get tacked on to the query string
qs_items = dict((key, val) for key, val in items.items()
if key not in self._keywords)
qs = self._make_qs(qs_items)
if qs:
return '?'.join([path, qs])
return path
|
Returns a relative path complete with query string for the given
dictionary of items.
Any items with keys matching this rule's url pattern will be inserted
into the path. Any remaining items will be appended as query string
parameters.
All items will be urlencoded. Any items which are not instances of
basestring, or int/long will be pickled before being urlencoded.
.. warning:: The pickling of items only works for key/value pairs which
will be in the query string. This behavior should only be
used for the simplest of python objects. It causes the
URL to get very lengthy (and unreadable) and XBMC has a
hard limit on URL length. See the caching section if you
need to persist a large amount of data between requests.
|
def set_limits(self, min_=None, max_=None):
"""
Sets limits for this config value
If the resulting integer is outside those limits, an exception will be raised
:param min_: minima
:param max_: maxima
"""
self._min, self._max = min_, max_
|
Sets limits for this config value
If the resulting integer is outside those limits, an exception will be raised
:param min_: minima
:param max_: maxima
|
def get_relation_count_query_for_self_join(self, query, parent):
"""
Add the constraints for a relationship count query on the same table.
:type query: eloquent.orm.Builder
:type parent: eloquent.orm.Builder
:rtype: eloquent.orm.Builder
"""
query.select(QueryExpression('COUNT(*)'))
table_prefix = self._query.get_query().get_connection().get_table_prefix()
hash_ = self.get_relation_count_hash()
query.from_('%s AS %s%s' % (self._table, table_prefix, hash_))
key = self.wrap(self.get_qualified_parent_key_name())
return query.where('%s.%s' % (hash_, self._foreign_key), '=', QueryExpression(key))
|
Add the constraints for a relationship count query on the same table.
:type query: eloquent.orm.Builder
:type parent: eloquent.orm.Builder
:rtype: eloquent.orm.Builder
|
def is_filter_with_outer_scope_vertex_field_operator(directive):
"""Return True if we have a filter directive whose operator applies to the outer scope."""
if directive.name.value != 'filter':
return False
op_name, _ = _get_filter_op_name_and_values(directive)
return op_name in OUTER_SCOPE_VERTEX_FIELD_OPERATORS
|
Return True if we have a filter directive whose operator applies to the outer scope.
|
def token_from_fragment(self, authorization_response):
"""Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
"""
self._client.parse_request_uri_response(
authorization_response, state=self._state
)
self.token = self._client.token
return self.token
|
Parse token from the URI fragment, used by MobileApplicationClients.
:param authorization_response: The full URL of the redirect back to you
:return: A token dict
|
def get_location(self, location_id, depth=0):
"""
Retrieves a single location by ID.
:param location_id: The unique ID of the location.
:type location_id: ``str``
"""
response = self._perform_request('/locations/%s?depth=%s' % (location_id, depth))
return response
|
Retrieves a single location by ID.
:param location_id: The unique ID of the location.
:type location_id: ``str``
|
def delete(self):
"""Deletes the current session file"""
if self.filename == ':memory:':
return True
try:
os.remove(self.filename)
return True
except OSError:
return False
|
Deletes the current session file
|
def convertTupleArrayToPoints(self, arrayOfPointTuples):
"""Method used to convert an array of tuples (x,y) into a string
suitable for createPolygon or createPolyline
@type arrayOfPointTuples: An array containing tuples eg.[(x1,y1),(x2,y2]
@param arrayOfPointTuples: All points needed to create the shape
@return a string in the form "x1,y1 x2,y2 x3,y3"
"""
points = ""
for tuple in arrayOfPointTuples:
points += str(tuple[0]) + "," + str(tuple[1]) + " "
return points
|
Method used to convert an array of tuples (x,y) into a string
suitable for createPolygon or createPolyline
@type arrayOfPointTuples: An array containing tuples eg.[(x1,y1),(x2,y2]
@param arrayOfPointTuples: All points needed to create the shape
@return a string in the form "x1,y1 x2,y2 x3,y3"
|
def build_dependencies(self):
'''build_dependencies
High-level api: Briefly compile all yang files and find out dependency
infomation of all modules.
Returns
-------
None
Nothing returns.
'''
cmd_list = ['pyang', '--plugindir', self.pyang_plugins]
cmd_list += ['-p', self.dir_yang]
cmd_list += ['-f', 'pyimport']
cmd_list += [self.dir_yang + '/*.yang']
logger.info('Building dependencies: {}'.format(' '.join(cmd_list)))
p = Popen(' '.join(cmd_list), shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
logger.info('pyang return code is {}'.format(p.returncode))
logger.debug(stderr.decode())
parser = etree.XMLParser(remove_blank_text=True)
self.dependencies = etree.XML(stdout.decode(), parser)
|
build_dependencies
High-level api: Briefly compile all yang files and find out dependency
infomation of all modules.
Returns
-------
None
Nothing returns.
|
def dump(self):
"""Returns the results in string format."""
text = ''
for result in self.objects:
if result.is_failure or result.is_error:
text += '\n#{red}#{bright}'
text += '{}\n'.format(''.ljust(79, '='))
status = 'FAILED' if result.is_failure else 'ERROR'
text += '{}: {}\n'.format(status, result.process)
text += '{}\n#{{reset_all}}'.format(''.ljust(79, '='))
if result.output:
text += result.output
if result.error:
if result.output:
text += '\n{}\n'.format(''.ljust(79, '-'))
text += 'Additional error output:\n'
text += '{}\n'.format(''.ljust(79, '-'))
text += result.error
if not text.endswith('\n'):
text += '\n'
if self.has_modified_files:
text += '\n#{{yellow}}#{{bright}}{}\n'.format(''.ljust(79, '-'))
text += 'Modified files:\n'
text += '{}\n'.format(''.ljust(79, '-'))
for path, modified_by in self.modified_files:
text += '#{{reset_all}}{} #{{cyan}}<- {}\n'.format(path, ', '.join(modified_by))
return text
|
Returns the results in string format.
|
def items(self):
""" Return a copy of the dictionary's list of (key, value) pairs. """
r = []
for key in self._safe_keys():
try:
r.append((key, self[key]))
except KeyError:
pass
return r
|
Return a copy of the dictionary's list of (key, value) pairs.
|
def get_tiltplane(self, sequence):
'''
Extract the main tilting plane basing on Z coordinate
'''
sequence = sorted(sequence, key=lambda x: self.virtual_atoms[ x ].z)
in_plane = []
for i in range(0, len(sequence)-4):
if abs(self.virtual_atoms[ sequence[i] ].z - self.virtual_atoms[ sequence[i+1] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \
abs(self.virtual_atoms[ sequence[i+1] ].z - self.virtual_atoms[ sequence[i+2] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE and \
abs(self.virtual_atoms[ sequence[i+2] ].z - self.virtual_atoms[ sequence[i+3] ].z) < self.OCTAHEDRON_ATOMS_Z_DIFFERENCE:
in_plane = [sequence[j] for j in range(i, i+4)]
return in_plane
|
Extract the main tilting plane basing on Z coordinate
|
def fit(self, X, y=None, input_type='affinity'):
"""
Fit the model from data in X.
Parameters
----------
input_type : string, one of: 'similarity', 'distance' or 'data'.
The values of input data X. (default = 'data')
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If self.input_type is similarity:
X : array-like, shape (n_samples, n_samples),
copy the similarity matrix X to S.
"""
X = self._validate_input(X, input_type)
self.fit_geometry(X, input_type)
random_state = check_random_state(self.random_state)
self.embedding_, self.eigen_vectors_, self.P_ = spectral_clustering(self.geom_, K = self.K,
eigen_solver = self.eigen_solver,
random_state = self.random_state,
solver_kwds = self.solver_kwds,
renormalize = self.renormalize,
stabalize = self.stabalize,
additional_vectors = self.additional_vectors)
|
Fit the model from data in X.
Parameters
----------
input_type : string, one of: 'similarity', 'distance' or 'data'.
The values of input data X. (default = 'data')
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
If self.input_type is similarity:
X : array-like, shape (n_samples, n_samples),
copy the similarity matrix X to S.
|
def ref(self, orm_classpath, cls_pk=None):
"""
takes a classpath to allow query-ing from another Orm class
the reason why it takes string paths is to avoid infinite recursion import
problems because an orm class from module A might have a ref from module B
and sometimes it is handy to have module B be able to get the objects from
module A that correspond to the object in module B, but you can't import
module A into module B because module B already imports module A.
orm_classpath -- string -- a full python class path (eg, foo.bar.Che)
cls_pk -- mixed -- automatically set the where field of orm_classpath
that references self.orm_class to the value in cls_pk if present
return -- Query()
"""
# split orm from module path
orm_module, orm_class = get_objects(orm_classpath)
# if orm_classpath.startswith("."):
# # we handle relative classpaths by using the orm_class and its parents
# # to find the relative import
# if self.orm_class:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# self.orm_class.__module__
# )
# except ImportError:
# parents = inspect.getmro(self.orm_class)
# if parents:
# for pc in parents[1:-1]:
# try:
# orm_module, orm_class = get_objects(
# orm_classpath,
# pc.__module__
# )
# except ImportError:
# pass
#
# if not orm_module or not orm_class:
# raise ImportError(
# "Unable to resolve relative ref using {}".format(
# self.orm_class.__module__
# )
# )
#
# else:
# raise ImportError("trying relative ref without orm_class")
#
# else:
# orm_module, orm_class = get_objects(orm_classpath)
# if isinstance(orm_classpath, basestring):
# orm_module, orm_class = get_objects(orm_classpath)
# else:
# orm_module, orm_class = get_objects(orm_classpath[0], orm_classpath[1])
q = orm_class.query
if cls_pk:
found = False
for fn, f in orm_class.schema.fields.items():
cls_ref_s = f.schema
if cls_ref_s and self.schema == cls_ref_s:
q.is_field(fn, cls_pk)
found = True
break
if not found:
raise ValueError("Did not find a foreign key field for [{}] in [{}]".format(
self.orm_class.table_name,
orm_class.table_name,
))
return q
|
takes a classpath to allow query-ing from another Orm class
the reason why it takes string paths is to avoid infinite recursion import
problems because an orm class from module A might have a ref from module B
and sometimes it is handy to have module B be able to get the objects from
module A that correspond to the object in module B, but you can't import
module A into module B because module B already imports module A.
orm_classpath -- string -- a full python class path (eg, foo.bar.Che)
cls_pk -- mixed -- automatically set the where field of orm_classpath
that references self.orm_class to the value in cls_pk if present
return -- Query()
|
def blend_rect(self, x0, y0, x1, y1, dx, dy, destination, alpha=0xff):
"""Blend a rectangle onto the image"""
x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1)
for x in range(x0, x1 + 1):
for y in range(y0, y1 + 1):
o = self._offset(x, y)
rgba = self.canvas[o:o + 4]
rgba[3] = alpha
destination.point(dx + x - x0, dy + y - y0, rgba)
|
Blend a rectangle onto the image
|
def upsert_object_property(self, identifier, properties, ignore_constraints=False):
"""Manipulate an object's property set. Inserts or updates properties in
given dictionary. If a property key does not exist in the object's
property set it is created. If the value is None an existing property is
deleted.
Existing object properties that are not present in the given property
set remain unaffacted.
Deleting mandatory properties or updating immutable properties results
in a ValueError. These constraints can be disabled using the
ignore_constraints parameter.
Parameters
----------
identifier : string
Unique object identifier
properties : Dictionary()
Dictionary of property names and their new values.
ignore_constraints : Boolean
Flag indicating whether to ignore immutable and mandatory property
constraints (True) or nore (False, Default).
Returns
-------
ObjectHandle
Handle to updated object or None if object does not exist
"""
# Retrieve the object with the gievn identifier. This is a (sub-)class
# of ObjectHandle
obj = self.get_object(identifier)
if not obj is None:
# Modify property set of retrieved object handle. Raise exception if
# and of the upserts is not valid.
for key in properties:
value = properties[key]
# If the update affects an immutable property raise exception
if not ignore_constraints and key in self.immutable_properties:
raise ValueError('update to immutable property: ' + key)
# Check whether the operation is an UPSERT (value != None) or
# DELETE (value == None)
if not value is None:
obj.properties[key] = value
else:
# DELETE. Make sure the property is not mandatory
if not ignore_constraints and key in self.mandatory_properties:
raise ValueError('delete mandatory property: ' + key)
elif key in obj.properties:
del obj.properties[key]
# Update object in database
self.replace_object(obj)
# Return object handle
return obj
|
Manipulate an object's property set. Inserts or updates properties in
given dictionary. If a property key does not exist in the object's
property set it is created. If the value is None an existing property is
deleted.
Existing object properties that are not present in the given property
set remain unaffacted.
Deleting mandatory properties or updating immutable properties results
in a ValueError. These constraints can be disabled using the
ignore_constraints parameter.
Parameters
----------
identifier : string
Unique object identifier
properties : Dictionary()
Dictionary of property names and their new values.
ignore_constraints : Boolean
Flag indicating whether to ignore immutable and mandatory property
constraints (True) or nore (False, Default).
Returns
-------
ObjectHandle
Handle to updated object or None if object does not exist
|
def _generalized_word_starts(self, xs):
"""Helper method returns the starting indexes of strings in GST"""
self.word_starts = []
i = 0
for n in range(len(xs)):
self.word_starts.append(i)
i += len(xs[n]) + 1
|
Helper method returns the starting indexes of strings in GST
|
def create_site(self, webspace_name, website_name, geo_region, host_names,
plan='VirtualDedicatedPlan', compute_mode='Shared',
server_farm=None, site_mode=None):
'''
Create a website.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
geo_region:
The geographical region of the webspace that will be created.
host_names:
An array of fully qualified domain names for website. Only one
hostname can be specified in the azurewebsites.net domain.
The hostname should match the name of the website. Custom domains
can only be specified for Shared or Standard websites.
plan:
This value must be 'VirtualDedicatedPlan'.
compute_mode:
This value should be 'Shared' for the Free or Paid Shared
offerings, or 'Dedicated' for the Standard offering. The default
value is 'Shared'. If you set it to 'Dedicated', you must specify
a value for the server_farm parameter.
server_farm:
The name of the Server Farm associated with this website. This is
a required value for Standard mode.
site_mode:
Can be None, 'Limited' or 'Basic'. This value is 'Limited' for the
Free offering, and 'Basic' for the Paid Shared offering. Standard
mode does not use the site_mode parameter; it uses the compute_mode
parameter.
'''
xml = _XmlSerializer.create_website_to_xml(webspace_name, website_name, geo_region, plan, host_names, compute_mode, server_farm, site_mode)
return self._perform_post(
self._get_sites_path(webspace_name),
xml,
Site)
|
Create a website.
webspace_name:
The name of the webspace.
website_name:
The name of the website.
geo_region:
The geographical region of the webspace that will be created.
host_names:
An array of fully qualified domain names for website. Only one
hostname can be specified in the azurewebsites.net domain.
The hostname should match the name of the website. Custom domains
can only be specified for Shared or Standard websites.
plan:
This value must be 'VirtualDedicatedPlan'.
compute_mode:
This value should be 'Shared' for the Free or Paid Shared
offerings, or 'Dedicated' for the Standard offering. The default
value is 'Shared'. If you set it to 'Dedicated', you must specify
a value for the server_farm parameter.
server_farm:
The name of the Server Farm associated with this website. This is
a required value for Standard mode.
site_mode:
Can be None, 'Limited' or 'Basic'. This value is 'Limited' for the
Free offering, and 'Basic' for the Paid Shared offering. Standard
mode does not use the site_mode parameter; it uses the compute_mode
parameter.
|
def create_topic(self, topic_name, topic=None, fail_on_exist=False):
'''
Creates a new topic. Once created, this topic resource manifest is
immutable.
topic_name:
Name of the topic to create.
topic:
Topic object to create.
fail_on_exist:
Specify whether to throw an exception when the topic exists.
'''
_validate_not_none('topic_name', topic_name)
request = HTTPRequest()
request.method = 'PUT'
request.host = self._get_host()
request.path = '/' + _str(topic_name) + ''
request.body = _get_request_body(_convert_topic_to_xml(topic))
request.path, request.query = self._httpclient._update_request_uri_query(request) # pylint: disable=protected-access
request.headers = self._update_service_bus_header(request)
if not fail_on_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True
|
Creates a new topic. Once created, this topic resource manifest is
immutable.
topic_name:
Name of the topic to create.
topic:
Topic object to create.
fail_on_exist:
Specify whether to throw an exception when the topic exists.
|
def length_degrees(self):
'''Computes the length of the arc in degrees.
The length computation corresponds to what you would expect if you would draw the arc using matplotlib taking direction into account.
>>> Arc((0,0), 1, 0, 0, True).length_degrees()
0.0
>>> Arc((0,0), 2, 0, 0, False).length_degrees()
0.0
>>> Arc((0,0), 3, 0, 1, True).length_degrees()
1.0
>>> Arc((0,0), 4, 0, 1, False).length_degrees()
359.0
>>> Arc((0,0), 5, 0, 360, True).length_degrees()
360.0
>>> Arc((0,0), 6, 0, 360, False).length_degrees()
0.0
>>> Arc((0,0), 7, 0, 361, True).length_degrees()
360.0
>>> Arc((0,0), 8, 0, 361, False).length_degrees()
359.0
>>> Arc((0,0), 9, 10, -10, True).length_degrees()
340.0
>>> Arc((0,0), 10, 10, -10, False).length_degrees()
20.0
>>> Arc((0,0), 1, 10, 5, True).length_degrees()
355.0
>>> Arc((0,0), 1, -10, -5, False).length_degrees()
355.0
>>> Arc((0,0), 1, 180, -180, True).length_degrees()
0.0
>>> Arc((0,0), 1, 180, -180, False).length_degrees()
360.0
>>> Arc((0,0), 1, -180, 180, True).length_degrees()
360.0
>>> Arc((0,0), 1, -180, 180, False).length_degrees()
0.0
>>> Arc((0,0), 1, 175, -175, True).length_degrees()
10.0
>>> Arc((0,0), 1, 175, -175, False).length_degrees()
350.0
'''
d_angle = self.sign * (self.to_angle - self.from_angle)
if (d_angle > 360):
return 360.0
elif (d_angle < 0):
return d_angle % 360.0
else:
return abs(d_angle)
|
Computes the length of the arc in degrees.
The length computation corresponds to what you would expect if you would draw the arc using matplotlib taking direction into account.
>>> Arc((0,0), 1, 0, 0, True).length_degrees()
0.0
>>> Arc((0,0), 2, 0, 0, False).length_degrees()
0.0
>>> Arc((0,0), 3, 0, 1, True).length_degrees()
1.0
>>> Arc((0,0), 4, 0, 1, False).length_degrees()
359.0
>>> Arc((0,0), 5, 0, 360, True).length_degrees()
360.0
>>> Arc((0,0), 6, 0, 360, False).length_degrees()
0.0
>>> Arc((0,0), 7, 0, 361, True).length_degrees()
360.0
>>> Arc((0,0), 8, 0, 361, False).length_degrees()
359.0
>>> Arc((0,0), 9, 10, -10, True).length_degrees()
340.0
>>> Arc((0,0), 10, 10, -10, False).length_degrees()
20.0
>>> Arc((0,0), 1, 10, 5, True).length_degrees()
355.0
>>> Arc((0,0), 1, -10, -5, False).length_degrees()
355.0
>>> Arc((0,0), 1, 180, -180, True).length_degrees()
0.0
>>> Arc((0,0), 1, 180, -180, False).length_degrees()
360.0
>>> Arc((0,0), 1, -180, 180, True).length_degrees()
360.0
>>> Arc((0,0), 1, -180, 180, False).length_degrees()
0.0
>>> Arc((0,0), 1, 175, -175, True).length_degrees()
10.0
>>> Arc((0,0), 1, 175, -175, False).length_degrees()
350.0
|
def as_bulk_queries(queries, bulk_size):
"""Group a iterable of (stmt, args) by stmt into (stmt, bulk_args).
bulk_args will be a list of the args grouped by stmt.
len(bulk_args) will be <= bulk_size
"""
stmt_dict = defaultdict(list)
for stmt, args in queries:
bulk_args = stmt_dict[stmt]
bulk_args.append(args)
if len(bulk_args) == bulk_size:
yield stmt, bulk_args
del stmt_dict[stmt]
for stmt, bulk_args in stmt_dict.items():
yield stmt, bulk_args
|
Group a iterable of (stmt, args) by stmt into (stmt, bulk_args).
bulk_args will be a list of the args grouped by stmt.
len(bulk_args) will be <= bulk_size
|
def json_request(self, registration_ids, data=None, collapse_key=None,
delay_while_idle=False, time_to_live=None, retries=5, dry_run=False):
"""
Makes a JSON request to GCM servers
:param registration_ids: list of the registration ids
:param data: dict mapping of key-value pairs of messages
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
:raises GCMMissingRegistrationException: if the list of registration_ids is empty
:raises GCMTooManyRegIdsException: if the list of registration_ids exceeds 1000 items
"""
if not registration_ids:
raise GCMMissingRegistrationException("Missing registration_ids")
if len(registration_ids) > 1000:
raise GCMTooManyRegIdsException(
"Exceded number of registration_ids")
backoff = self.BACKOFF_INITIAL_DELAY
for attempt in range(retries):
payload = self.construct_payload(
registration_ids, data, collapse_key,
delay_while_idle, time_to_live, True, dry_run
)
response = self.make_request(payload, is_json=True)
info = self.handle_json_response(response, registration_ids)
unsent_reg_ids = self.extract_unsent_reg_ids(info)
if unsent_reg_ids:
registration_ids = unsent_reg_ids
sleep_time = backoff / 2 + random.randrange(backoff)
time.sleep(float(sleep_time) / 1000)
if 2 * backoff < self.MAX_BACKOFF_DELAY:
backoff *= 2
else:
break
return info
|
Makes a JSON request to GCM servers
:param registration_ids: list of the registration ids
:param data: dict mapping of key-value pairs of messages
:return dict of response body from Google including multicast_id, success, failure, canonical_ids, etc
:raises GCMMissingRegistrationException: if the list of registration_ids is empty
:raises GCMTooManyRegIdsException: if the list of registration_ids exceeds 1000 items
|
def update_listener(self, lbaas_listener, body=None):
"""Updates a lbaas_listener."""
return self.put(self.lbaas_listener_path % (lbaas_listener),
body=body)
|
Updates a lbaas_listener.
|
def flavor_extra_set(request, flavor_id, metadata):
"""Set the flavor extra spec keys."""
flavor = _nova.novaclient(request).flavors.get(flavor_id)
if (not metadata): # not a way to delete keys
return None
return flavor.set_keys(metadata)
|
Set the flavor extra spec keys.
|
def sam2fastq(sam, singles = False, force = False):
"""
convert sam to fastq
"""
L, R = None, None
for line in sam:
if line.startswith('@') is True:
continue
line = line.strip().split()
bit = [True if i == '1' else False \
for i in bin(int(line[1])).split('b')[1][::-1]]
while len(bit) < 8:
bit.append(False)
pair, proper, na, nap, rev, mrev, left, right = bit
# make sure read is paired
if pair is False:
if singles is True:
print_single(line, rev)
continue
# check if sequence is reverse-complemented
if rev is True:
seq = rc(['', line[9]])[1]
qual = line[10][::-1]
else:
seq = line[9]
qual = line[10]
# check if read is forward or reverse, return when both have been found
if left is True:
if L is not None and force is False:
print('sam file is not sorted', file = sys.stderr)
print('\te.g.: %s' % (line[0]), file = sys.stderr)
exit()
if L is not None:
L = None
continue
L = ['@%s' % line[0], seq, '+%s' % line[0], qual]
if R is not None:
yield L
yield R
L, R = None, None
if right is True:
if R is not None and force is False:
print('sam file is not sorted', file = sys.stderr)
print('\te.g.: %s' % (line[0]), file = sys.stderr)
exit()
if R is not None:
R = None
continue
R = ['@%s' % line[0], seq, '+%s' % line[0], qual]
if L is not None:
yield L
yield R
L, R = None, None
|
convert sam to fastq
|
def parse_services(config, services):
"""Parse configuration to return number of enabled service checks.
Arguments:
config (obj): A configparser object with the configuration of
anycast-healthchecker.
services (list): A list of section names which holds configuration
for each service check
Returns:
A number (int) of enabled service checks.
"""
enabled = 0
for service in services:
check_disabled = config.getboolean(service, 'check_disabled')
if not check_disabled:
enabled += 1
return enabled
|
Parse configuration to return number of enabled service checks.
Arguments:
config (obj): A configparser object with the configuration of
anycast-healthchecker.
services (list): A list of section names which holds configuration
for each service check
Returns:
A number (int) of enabled service checks.
|
def probe(self):
"""
Probe the file for new lines
"""
# make sure the filehandler is still valid
# (e.g. file stat hasnt changed, file exists etc.)
if not self.validate_file_handler():
return []
messages = []
# read any new lines and push them onto the stack
for line in self.fh.readlines(self.max_lines):
data = {"path":self.path}
msg = self.new_message()
# process the line - this is where parsing happens
parsed = self.process_line(line, data)
if not parsed:
continue
data.update(parsed)
# process the probe - this is where data assignment
# happens
data = self.process_probe(data)
msg["data"] = [data]
messages.append(msg)
# process all new messages before returning them
# for emission
messages = self.process_messages(messages)
return messages
|
Probe the file for new lines
|
def _speak_normal_inherit(self, element):
"""
Speak the content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
"""
self._visit(element, self._speak_normal)
element.normalize()
|
Speak the content of element and descendants.
:param element: The element.
:type element: hatemile.util.html.htmldomelement.HTMLDOMElement
|
def float(self, item, default=None):
""" Return value of key as a float
:param item: key of value to transform
:param default: value to return if item does not exist
:return: float of value
"""
try:
item = self.__getattr__(item)
except AttributeError as err:
if default is not None:
return default
raise err
return float(item)
|
Return value of key as a float
:param item: key of value to transform
:param default: value to return if item does not exist
:return: float of value
|
def FromArchive(cls, path, actions_dict, resources_dict, temp_dir=None):
"""Create a RecipeObject from a .ship archive.
This archive should have been generated from a previous call to
iotile-ship -a <path to yaml file>
or via iotile-build using autobuild_shiparchive().
Args:
path (str): The path to the recipe file that we wish to load
actions_dict (dict): A dictionary of named RecipeActionObject
types that is used to look up all of the steps listed in
the recipe file.
resources_dict (dict): A dictionary of named RecipeResource types
that is used to look up all of the shared resources listed in
the recipe file.
file_format (str): The file format of the recipe file. Currently
we only support yaml.
temp_dir (str): An optional temporary directory where this archive
should be unpacked. Otherwise a system wide temporary directory
is used.
"""
if not path.endswith(".ship"):
raise ArgumentError("Attempted to unpack a recipe archive from a file that did not end in .ship", path=path)
name = os.path.basename(path)[:-5]
if temp_dir is None:
temp_dir = tempfile.mkdtemp()
extract_path = os.path.join(temp_dir, name)
archive = zipfile.ZipFile(path, "r")
archive.extractall(extract_path)
recipe_yaml = os.path.join(extract_path, 'recipe_script.yaml')
return cls.FromFile(recipe_yaml, actions_dict, resources_dict, name=name)
|
Create a RecipeObject from a .ship archive.
This archive should have been generated from a previous call to
iotile-ship -a <path to yaml file>
or via iotile-build using autobuild_shiparchive().
Args:
path (str): The path to the recipe file that we wish to load
actions_dict (dict): A dictionary of named RecipeActionObject
types that is used to look up all of the steps listed in
the recipe file.
resources_dict (dict): A dictionary of named RecipeResource types
that is used to look up all of the shared resources listed in
the recipe file.
file_format (str): The file format of the recipe file. Currently
we only support yaml.
temp_dir (str): An optional temporary directory where this archive
should be unpacked. Otherwise a system wide temporary directory
is used.
|
def _six_fail_hook(modname):
"""Fix six.moves imports due to the dynamic nature of this
class.
Construct a pseudo-module which contains all the necessary imports
for six
:param modname: Name of failed module
:type modname: str
:return: An astroid module
:rtype: nodes.Module
"""
attribute_of = modname != "six.moves" and modname.startswith("six.moves")
if modname != "six.moves" and not attribute_of:
raise AstroidBuildingError(modname=modname)
module = AstroidBuilder(MANAGER).string_build(_IMPORTS)
module.name = "six.moves"
if attribute_of:
# Facilitate import of submodules in Moves
start_index = len(module.name)
attribute = modname[start_index:].lstrip(".").replace(".", "_")
try:
import_attr = module.getattr(attribute)[0]
except AttributeInferenceError:
raise AstroidBuildingError(modname=modname)
if isinstance(import_attr, nodes.Import):
submodule = MANAGER.ast_from_module_name(import_attr.names[0][0])
return submodule
# Let dummy submodule imports pass through
# This will cause an Uninferable result, which is okay
return module
|
Fix six.moves imports due to the dynamic nature of this
class.
Construct a pseudo-module which contains all the necessary imports
for six
:param modname: Name of failed module
:type modname: str
:return: An astroid module
:rtype: nodes.Module
|
def get_release_task_attachments(self, project, release_id, environment_id, attempt_id, plan_id, type):
"""GetReleaseTaskAttachments.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:param int attempt_id:
:param str plan_id:
:param str type:
:rtype: [ReleaseTaskAttachment]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if release_id is not None:
route_values['releaseId'] = self._serialize.url('release_id', release_id, 'int')
if environment_id is not None:
route_values['environmentId'] = self._serialize.url('environment_id', environment_id, 'int')
if attempt_id is not None:
route_values['attemptId'] = self._serialize.url('attempt_id', attempt_id, 'int')
if plan_id is not None:
route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
response = self._send(http_method='GET',
location_id='a4d06688-0dfa-4895-82a5-f43ec9452306',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('[ReleaseTaskAttachment]', self._unwrap_collection(response))
|
GetReleaseTaskAttachments.
[Preview API]
:param str project: Project ID or project name
:param int release_id:
:param int environment_id:
:param int attempt_id:
:param str plan_id:
:param str type:
:rtype: [ReleaseTaskAttachment]
|
def validate_event_and_assign_id(event):
"""
Ensure that the event has a valid time. Assign a random UUID based on the
event time.
"""
event_time = event.get(TIMESTAMP_FIELD)
if event_time is None:
event[TIMESTAMP_FIELD] = event_time = epoch_time_to_kronos_time(time.time())
elif type(event_time) not in (int, long):
raise InvalidEventTime(event_time)
# Generate a uuid1-like sequence from the event time with the non-time bytes
# set to random values.
_id = uuid_from_kronos_time(event_time)
event[ID_FIELD] = str(_id)
return _id, event
|
Ensure that the event has a valid time. Assign a random UUID based on the
event time.
|
def cv(params, train_set, num_boost_round=100,
folds=None, nfold=5, stratified=True, shuffle=True,
metrics=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
early_stopping_rounds=None, fpreproc=None,
verbose_eval=None, show_stdv=True, seed=0,
callbacks=None, eval_train_metric=False):
"""Perform the cross-validation with given paramaters.
Parameters
----------
params : dict
Parameters for Booster.
train_set : Dataset
Data to be trained on.
num_boost_round : int, optional (default=100)
Number of boosting iterations.
folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
If generator or iterator, it should yield the train and test indices for each fold.
If object, it should be one of the scikit-learn splitter classes
(https://scikit-learn.org/stable/modules/classes.html#splitter-classes)
and have ``split`` method.
This argument has highest priority over other data split arguments.
nfold : int, optional (default=5)
Number of folds in CV.
stratified : bool, optional (default=True)
Whether to perform stratified sampling.
shuffle : bool, optional (default=True)
Whether to shuffle before splitting data.
metrics : string, list of strings or None, optional (default=None)
Evaluation metrics to be monitored while CV.
If not None, the metric in ``params`` will be overridden.
fobj : callable or None, optional (default=None)
Custom objective function.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
To ignore the default metric corresponding to the used objective,
set ``metrics`` to the string ``"None"``.
init_model : string, Booster or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
early_stopping_rounds : int or None, optional (default=None)
Activates early stopping.
CV score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue.
Requires at least one metric. If there's more than one, will check all of them.
To check only the first metric you can pass in ``callbacks``
``early_stopping`` callback with ``first_metric_only=True``.
Last entry in evaluation history is the one from the best iteration.
fpreproc : callable or None, optional (default=None)
Preprocessing function that takes (dtrain, dtest, params)
and returns transformed versions of those.
verbose_eval : bool, int, or None, optional (default=None)
Whether to display the progress.
If None, progress will be displayed when np.ndarray is returned.
If True, progress will be displayed at every boosting stage.
If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
show_stdv : bool, optional (default=True)
Whether to display the standard deviation in progress.
Results are not affected by this parameter, and always contain std.
seed : int, optional (default=0)
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callables or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
eval_train_metric : bool, optional (default=False)
Whether to display the train metric in progress.
The score of the metric is calculated again after each training step, so there is some impact on performance.
Returns
-------
eval_hist : dict
Evaluation history.
The dictionary has the following format:
{'metric1-mean': [values], 'metric1-stdv': [values],
'metric2-mean': [values], 'metric2-stdv': [values],
...}.
"""
if not isinstance(train_set, Dataset):
raise TypeError("Traninig only accepts Dataset object")
params = copy.deepcopy(params)
if fobj is not None:
params['objective'] = 'none'
for alias in ["num_iterations", "num_iteration", "n_iter", "num_tree", "num_trees",
"num_round", "num_rounds", "num_boost_round", "n_estimators"]:
if alias in params:
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
num_boost_round = params.pop(alias)
break
for alias in ["early_stopping_round", "early_stopping_rounds", "early_stopping"]:
if alias in params:
warnings.warn("Found `{}` in params. Will use it instead of argument".format(alias))
early_stopping_rounds = params.pop(alias)
break
if num_boost_round <= 0:
raise ValueError("num_boost_round should be greater than zero.")
if isinstance(init_model, string_type):
predictor = _InnerPredictor(model_file=init_model, pred_parameter=params)
elif isinstance(init_model, Booster):
predictor = init_model._to_predictor(dict(init_model.params, **params))
else:
predictor = None
train_set._update_params(params) \
._set_predictor(predictor) \
.set_feature_name(feature_name) \
.set_categorical_feature(categorical_feature)
if metrics is not None:
params['metric'] = metrics
results = collections.defaultdict(list)
cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold,
params=params, seed=seed, fpreproc=fpreproc,
stratified=stratified, shuffle=shuffle,
eval_train_metric=eval_train_metric)
# setup callbacks
if callbacks is None:
callbacks = set()
else:
for i, cb in enumerate(callbacks):
cb.__dict__.setdefault('order', i - len(callbacks))
callbacks = set(callbacks)
if early_stopping_rounds is not None:
callbacks.add(callback.early_stopping(early_stopping_rounds, verbose=False))
if verbose_eval is True:
callbacks.add(callback.print_evaluation(show_stdv=show_stdv))
elif isinstance(verbose_eval, integer_types):
callbacks.add(callback.print_evaluation(verbose_eval, show_stdv=show_stdv))
callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)}
callbacks_after_iter = callbacks - callbacks_before_iter
callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order'))
callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order'))
for i in range_(num_boost_round):
for cb in callbacks_before_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=None))
cvfolds.update(fobj=fobj)
res = _agg_cv_result(cvfolds.eval_valid(feval), eval_train_metric)
for _, key, mean, _, std in res:
results[key + '-mean'].append(mean)
results[key + '-stdv'].append(std)
try:
for cb in callbacks_after_iter:
cb(callback.CallbackEnv(model=cvfolds,
params=params,
iteration=i,
begin_iteration=0,
end_iteration=num_boost_round,
evaluation_result_list=res))
except callback.EarlyStopException as earlyStopException:
cvfolds.best_iteration = earlyStopException.best_iteration + 1
for k in results:
results[k] = results[k][:cvfolds.best_iteration]
break
return dict(results)
|
Perform the cross-validation with given paramaters.
Parameters
----------
params : dict
Parameters for Booster.
train_set : Dataset
Data to be trained on.
num_boost_round : int, optional (default=100)
Number of boosting iterations.
folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None)
If generator or iterator, it should yield the train and test indices for each fold.
If object, it should be one of the scikit-learn splitter classes
(https://scikit-learn.org/stable/modules/classes.html#splitter-classes)
and have ``split`` method.
This argument has highest priority over other data split arguments.
nfold : int, optional (default=5)
Number of folds in CV.
stratified : bool, optional (default=True)
Whether to perform stratified sampling.
shuffle : bool, optional (default=True)
Whether to shuffle before splitting data.
metrics : string, list of strings or None, optional (default=None)
Evaluation metrics to be monitored while CV.
If not None, the metric in ``params`` will be overridden.
fobj : callable or None, optional (default=None)
Custom objective function.
feval : callable or None, optional (default=None)
Customized evaluation function.
Should accept two parameters: preds, train_data,
and return (eval_name, eval_result, is_higher_better) or list of such tuples.
For multi-class task, the preds is group by class_id first, then group by row_id.
If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i].
To ignore the default metric corresponding to the used objective,
set ``metrics`` to the string ``"None"``.
init_model : string, Booster or None, optional (default=None)
Filename of LightGBM model or Booster instance used for continue training.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used.
All values in categorical features should be less than int32 max value (2147483647).
Large values could be memory consuming. Consider using consecutive integers starting from zero.
All negative values in categorical features will be treated as missing values.
early_stopping_rounds : int or None, optional (default=None)
Activates early stopping.
CV score needs to improve at least every ``early_stopping_rounds`` round(s)
to continue.
Requires at least one metric. If there's more than one, will check all of them.
To check only the first metric you can pass in ``callbacks``
``early_stopping`` callback with ``first_metric_only=True``.
Last entry in evaluation history is the one from the best iteration.
fpreproc : callable or None, optional (default=None)
Preprocessing function that takes (dtrain, dtest, params)
and returns transformed versions of those.
verbose_eval : bool, int, or None, optional (default=None)
Whether to display the progress.
If None, progress will be displayed when np.ndarray is returned.
If True, progress will be displayed at every boosting stage.
If int, progress will be displayed at every given ``verbose_eval`` boosting stage.
show_stdv : bool, optional (default=True)
Whether to display the standard deviation in progress.
Results are not affected by this parameter, and always contain std.
seed : int, optional (default=0)
Seed used to generate the folds (passed to numpy.random.seed).
callbacks : list of callables or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
eval_train_metric : bool, optional (default=False)
Whether to display the train metric in progress.
The score of the metric is calculated again after each training step, so there is some impact on performance.
Returns
-------
eval_hist : dict
Evaluation history.
The dictionary has the following format:
{'metric1-mean': [values], 'metric1-stdv': [values],
'metric2-mean': [values], 'metric2-stdv': [values],
...}.
|
def to_text(value):
"""Convert a DNS rdata class to text.
@param value: the rdata class value
@type value: int
@rtype: string
@raises ValueError: the rdata class value is not >= 0 and <= 65535
"""
if value < 0 or value > 65535:
raise ValueError("class must be between >= 0 and <= 65535")
text = _by_value.get(value)
if text is None:
text = 'CLASS' + repr(value)
return text
|
Convert a DNS rdata class to text.
@param value: the rdata class value
@type value: int
@rtype: string
@raises ValueError: the rdata class value is not >= 0 and <= 65535
|
def show_vertex(self, node_id, show_ce_ratio=True):
"""Plot the vicinity of a node and its ce_ratio.
:param node_id: Node ID of the node to be shown.
:type node_id: int
:param show_ce_ratio: If true, shows the ce_ratio of the node, too.
:type show_ce_ratio: bool, optional
"""
# Importing matplotlib takes a while, so don't do that at the header.
from matplotlib import pyplot as plt
fig = plt.figure()
ax = fig.gca()
plt.axis("equal")
# Find the edges that contain the vertex
edge_gids = numpy.where((self.edges["nodes"] == node_id).any(axis=1))[0]
# ... and plot them
for node_ids in self.edges["nodes"][edge_gids]:
x = self.node_coords[node_ids]
ax.plot(x[:, 0], x[:, 1], "k")
# Highlight ce_ratios.
if show_ce_ratio:
if self.cell_circumcenters is None:
X = self.node_coords[self.cells["nodes"]]
self.cell_circumcenters = self.compute_triangle_circumcenters(
X, self.ei_dot_ei, self.ei_dot_ej
)
# Find the cells that contain the vertex
cell_ids = numpy.where((self.cells["nodes"] == node_id).any(axis=1))[0]
for cell_id in cell_ids:
for edge_gid in self.cells["edges"][cell_id]:
if node_id not in self.edges["nodes"][edge_gid]:
continue
node_ids = self.edges["nodes"][edge_gid]
edge_midpoint = 0.5 * (
self.node_coords[node_ids[0]] + self.node_coords[node_ids[1]]
)
p = _column_stack(self.cell_circumcenters[cell_id], edge_midpoint)
q = numpy.column_stack(
[
self.cell_circumcenters[cell_id],
edge_midpoint,
self.node_coords[node_id],
]
)
ax.fill(q[0], q[1], color="0.5")
ax.plot(p[0], p[1], color="0.7")
return
|
Plot the vicinity of a node and its ce_ratio.
:param node_id: Node ID of the node to be shown.
:type node_id: int
:param show_ce_ratio: If true, shows the ce_ratio of the node, too.
:type show_ce_ratio: bool, optional
|
def _set_ovsdb_server(self, v, load=False):
"""
Setter method for ovsdb_server, mapped from YANG variable /ovsdb_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ovsdb_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ovsdb_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",ovsdb_server.ovsdb_server, yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}), is_container='list', yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ovsdb_server must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",ovsdb_server.ovsdb_server, yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}), is_container='list', yang_name="ovsdb-server", rest_name="ovsdb-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure OVSDB server.', u'callpoint': u'ovsdbServerConfig', u'sort-priority': u'RUNNCFG_LEVEL_OVSDB_SERVER_CONFIG', u'cli-suppress-list-no': None, u'cli-full-command': None, u'hidden': u'full', u'cli-full-no': None, u'cli-mode-name': u'config-server-$(name)'}}, namespace='urn:brocade.com:mgmt:brocade-tunnels', defining_module='brocade-tunnels', yang_type='list', is_config=True)""",
})
self.__ovsdb_server = t
if hasattr(self, '_set'):
self._set()
|
Setter method for ovsdb_server, mapped from YANG variable /ovsdb_server (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ovsdb_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ovsdb_server() directly.
|
def priority_compare(self, other):
"""
Compares the MIME::Type based on how reliable it is before doing a
normal <=> comparison. Used by MIME::Types#[] to sort types. The
comparisons involved are:
1. self.simplified <=> other.simplified (ensures that we
don't try to compare different types)
2. IANA-registered definitions < other definitions.
3. Generic definitions < platform definitions.
3. Complete definitions < incomplete definitions.
4. Current definitions < obsolete definitions.
5. Obselete with use-instead references < obsolete without.
6. Obsolete use-instead definitions are compared.
"""
pc = cmp(self.simplified, other.simplified)
if pc is 0:
if self.is_registered != other.is_registered:
# registered < unregistered
pc = -1 if self.is_registered else 1
elif self.platform != other.platform:
# generic < platform
pc = 1 if self.platform else -1
elif self.is_complete != other.is_complete:
# complete < incomplete
pc = -1 if self.is_complete else 1
elif self.is_obsolete != other.is_obsolete:
# current < obsolete
pc = 1 if self.is_obsolete else -1
if pc is 0 and self.is_obsolete and (self.use_instead != other.use_instead):
if self.use_instead is None:
pc = -1
elif other.use_instead is None:
pc = 1
else:
pc = cmp(self.use_instead, other.use_instead)
return pc
|
Compares the MIME::Type based on how reliable it is before doing a
normal <=> comparison. Used by MIME::Types#[] to sort types. The
comparisons involved are:
1. self.simplified <=> other.simplified (ensures that we
don't try to compare different types)
2. IANA-registered definitions < other definitions.
3. Generic definitions < platform definitions.
3. Complete definitions < incomplete definitions.
4. Current definitions < obsolete definitions.
5. Obselete with use-instead references < obsolete without.
6. Obsolete use-instead definitions are compared.
|
def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK):
"""Flashes the last downloaded file to the given partition.
Args:
partition: Partition to flash.
timeout_ms: Optional timeout in milliseconds to wait for it to finish.
info_cb: See Download. Usually no messages.
Returns:
Response to a download request, normally nothing.
"""
return self._simple_command('flash', arg=partition, info_cb=info_cb,
timeout_ms=timeout_ms)
|
Flashes the last downloaded file to the given partition.
Args:
partition: Partition to flash.
timeout_ms: Optional timeout in milliseconds to wait for it to finish.
info_cb: See Download. Usually no messages.
Returns:
Response to a download request, normally nothing.
|
def _error_repr(error):
"""A compact unique representation of an error."""
error_repr = repr(error)
if len(error_repr) > 200:
error_repr = hash(type(error))
return error_repr
|
A compact unique representation of an error.
|
def flush_buffers(self):
"""Default implementation, calls Read() until it blocks."""
while True:
try:
self.read(FLUSH_READ_SIZE, timeout_ms=10)
except usb_exceptions.LibusbWrappingError as exception:
if exception.is_timeout():
break
raise
|
Default implementation, calls Read() until it blocks.
|
def get_wsgi_server(
self, sock, wsgi_app, protocol=HttpOnlyProtocol, debug=False
):
"""Get the WSGI server used to process requests."""
return wsgi.Server(
sock,
sock.getsockname(),
wsgi_app,
protocol=protocol,
debug=debug,
log=getLogger(__name__)
)
|
Get the WSGI server used to process requests.
|
def c_ideal_gas(T, k, MW):
r'''Calculates speed of sound `c` in an ideal gas at temperature T.
.. math::
c = \sqrt{kR_{specific}T}
Parameters
----------
T : float
Temperature of fluid, [K]
k : float
Isentropic exponent of fluid, [-]
MW : float
Molecular weight of fluid, [g/mol]
Returns
-------
c : float
Speed of sound in fluid, [m/s]
Notes
-----
Used in compressible flow calculations.
Note that the gas constant used is the specific gas constant:
.. math::
R_{specific} = R\frac{1000}{MW}
Examples
--------
>>> c_ideal_gas(T=303, k=1.4, MW=28.96)
348.9820953185441
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
Rspecific = R*1000./MW
return (k*Rspecific*T)**0.5
|
r'''Calculates speed of sound `c` in an ideal gas at temperature T.
.. math::
c = \sqrt{kR_{specific}T}
Parameters
----------
T : float
Temperature of fluid, [K]
k : float
Isentropic exponent of fluid, [-]
MW : float
Molecular weight of fluid, [g/mol]
Returns
-------
c : float
Speed of sound in fluid, [m/s]
Notes
-----
Used in compressible flow calculations.
Note that the gas constant used is the specific gas constant:
.. math::
R_{specific} = R\frac{1000}{MW}
Examples
--------
>>> c_ideal_gas(T=303, k=1.4, MW=28.96)
348.9820953185441
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
|
def _anova(self, dv=None, between=None, detailed=False, export_filename=None):
"""Return one-way and two-way ANOVA."""
aov = anova(data=self, dv=dv, between=between, detailed=detailed,
export_filename=export_filename)
return aov
|
Return one-way and two-way ANOVA.
|
def _safemembers(members):
"""Check members of a tar archive for safety.
Ensure that they do not contain paths or links outside of where we
need them - this would only happen if the archive wasn't made by
eqcorrscan.
:type members: :class:`tarfile.TarFile`
:param members: an open tarfile.
"""
base = _resolved(".")
for finfo in members:
if _badpath(finfo.name, base):
print(finfo.name, "is blocked (illegal path)")
elif finfo.issym() and _badlink(finfo, base):
print(finfo.name, "is blocked: Hard link to", finfo.linkname)
elif finfo.islnk() and _badlink(finfo, base):
print(finfo.name, "is blocked: Symlink to", finfo.linkname)
else:
yield finfo
|
Check members of a tar archive for safety.
Ensure that they do not contain paths or links outside of where we
need them - this would only happen if the archive wasn't made by
eqcorrscan.
:type members: :class:`tarfile.TarFile`
:param members: an open tarfile.
|
def wcs_pix_transform(ct, i, format=0):
"""Computes the WCS corrected pixel value given a coordinate
transformation and the raw pixel value.
Input:
ct coordinate transformation. instance of coord_tran.
i raw pixel intensity.
format format string (optional).
Returns:
WCS corrected pixel value
"""
z1 = float(ct.z1)
z2 = float(ct.z2)
i = float(i)
yscale = 128.0 / (z2 - z1)
if (format == 'T' or format == 't'):
format = 1
if (i == 0):
t = 0.
else:
if (ct.zt == W_LINEAR):
t = ((i - 1) * (z2 - z1) / 199.0) + z1
t = max(z1, min(z2, t))
else:
t = float(i)
if (format > 1):
t = (z2 - t) * yscale
return (t)
|
Computes the WCS corrected pixel value given a coordinate
transformation and the raw pixel value.
Input:
ct coordinate transformation. instance of coord_tran.
i raw pixel intensity.
format format string (optional).
Returns:
WCS corrected pixel value
|
def serializer(self, create=False, many=False):
"""
Decorator to mark a :class:`Serializer` subclass for a specific purpose, ie,
to be used during object creation **or** for serializing lists of objects.
:param create: Whether or not this serializer is for object creation.
:param many: Whether or not this serializer is for lists of objects.
"""
if create and many:
raise Exception('Can only set one of `create` or `many` to `True`')
def wrapper(cls):
cls.__kind__ = (create and 'create'
or many and 'many'
or 'all')
return cls
return wrapper
|
Decorator to mark a :class:`Serializer` subclass for a specific purpose, ie,
to be used during object creation **or** for serializing lists of objects.
:param create: Whether or not this serializer is for object creation.
:param many: Whether or not this serializer is for lists of objects.
|
def check_payment_v2(state_engine, state_op_type, nameop, fee_block_id, token_address, burn_address, name_fee, block_id):
"""
Verify that for a version-2 namespace (burn-to-creator), the nameop paid the right amount of BTC or Stacks.
It can pay either through a preorder (for registers), or directly (for renewals)
Return {'status': True, 'tokens_paid': ..., 'token_units': ...} if so
Return {'status': False} if not.
"""
# priced in BTC only if the namespace creator can receive name fees.
# once the namespace switches over to burning, then the name creator can pay in Stacks as well.
assert name_fee is not None
assert isinstance(name_fee, (int,long))
epoch_features = get_epoch_features(block_id)
name = nameop['name']
namespace_id = get_namespace_from_name(name)
name_without_namespace = get_name_from_fq_name(name)
namespace = state_engine.get_namespace( namespace_id )
assert namespace['version'] == NAMESPACE_VERSION_PAY_TO_CREATOR
# need to be in the right epoch--i.e. pay-to-creator needs to be a feature
if EPOCH_FEATURE_NAMESPACE_BURN_TO_CREATOR not in epoch_features:
log.warning("Name '{}' was created in namespace '{}', with cversion bits 0x{:x}, which is not supported in this epoch".format(name, namespace['namespace_id'], namespace['version']))
return {'status': False}
# check burn address
receive_fees_period = get_epoch_namespace_receive_fees_period(block_id, namespace['namespace_id'])
expected_burn_address = None
tokens_allowed = None
# can only burn to namespace if the namespace is young enough (starts counting from NAMESPACE_REVEAL)
# can only pay in tokens if the register takes place after the pay-to-creator period (receive_fees_period) expires
if namespace['reveal_block'] + receive_fees_period >= block_id:
log.debug("Register must pay to v2 namespace address {}".format(namespace['address']))
expected_burn_address = namespace['address']
tokens_allowed = False
else:
log.debug("Register must pay to burn address {}".format(BLOCKSTACK_BURN_ADDRESS))
expected_burn_address = BLOCKSTACK_BURN_ADDRESS
tokens_allowed = True
if burn_address != expected_burn_address:
log.warning("Buyer of {} used the wrong burn address ({}): expected {}".format(name, burn_address, expected_burn_address))
return {'status': False}
# allowed to pay in Stacks?
if EPOCH_FEATURE_NAMEOPS_COST_TOKENS in epoch_features:
# did we pay any stacks?
res = get_stacks_payment(state_engine, nameop, state_op_type)
if res['status']:
# paid something in Stacks. Will ignore BTC.
if not tokens_allowed:
log.warning('Buyer of {} paid in Stacks, but should have paid in BTC to the namespace creator'.format(name))
return {'status': False}
res = check_payment_in_stacks(state_engine, nameop, state_op_type, fee_block_id)
if not res['status']:
log.warning("Buyer of {} paid in Stacks, but did not pay enough".format(name))
return {'status': False}
tokens_paid = res['tokens_paid']
token_units = res['token_units']
return {'status': True, 'tokens_paid': tokens_paid, 'token_units': token_units}
# did not pay in stacks tokens, or this isn't allowed yet
btc_price = price_name(name_without_namespace, namespace, fee_block_id) # price reflects namespace version
# fee must be high enough (either the preorder paid the right fee at the preorder block height,
# or the renewal paid the right fee at the renewal height)
if name_fee < btc_price:
log.warning("Name '%s' costs %s satoshis, but paid %s satoshis" % (name, btc_price, name_fee))
return {'status': False}
log.debug('Paid {} satoshis for {} to {}'.format(name_fee, name, burn_address))
return {'status': True, 'tokens_paid': name_fee, 'token_units': 'BTC'}
|
Verify that for a version-2 namespace (burn-to-creator), the nameop paid the right amount of BTC or Stacks.
It can pay either through a preorder (for registers), or directly (for renewals)
Return {'status': True, 'tokens_paid': ..., 'token_units': ...} if so
Return {'status': False} if not.
|
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
command):
'''
Execute a command and read the output as YAML
'''
try:
command = command.replace('%s', minion_id)
output = __salt__['cmd.run_stdout'](command, python_shell=True)
return salt.utils.yaml.safe_load(output)
except Exception:
log.critical(
'YAML data from \'%s\' failed to parse. Command output:\n%s',
command, output
)
return {}
|
Execute a command and read the output as YAML
|
def partial_update(self, index, doc_type, id, doc=None, script=None, params=None,
upsert=None, querystring_args=None):
"""
Partially update a document with a script
"""
if querystring_args is None:
querystring_args = {}
if doc is None and script is None:
raise InvalidQuery("script or doc can not both be None")
if doc is None:
cmd = {"script": script}
if params:
cmd["params"] = params
if upsert:
cmd["upsert"] = upsert
else:
cmd = {"doc": doc }
path = make_path(index, doc_type, id, "_update")
return self._send_request('POST', path, cmd, querystring_args)
|
Partially update a document with a script
|
def import_simple_cookie(cls, simple_cookie):
""" Create cookie jar from SimpleCookie object
:param simple_cookie: cookies to import
:return: WHTTPCookieJar
"""
cookie_jar = WHTTPCookieJar()
for cookie_name in simple_cookie.keys():
cookie_attrs = {}
for attr_name in WHTTPCookie.cookie_attr_value_compliance.keys():
attr_value = simple_cookie[cookie_name][attr_name]
if attr_value != '':
cookie_attrs[attr_name] = attr_value
cookie_jar.add_cookie(WHTTPCookie(
cookie_name, simple_cookie[cookie_name].value, **cookie_attrs
))
return cookie_jar
|
Create cookie jar from SimpleCookie object
:param simple_cookie: cookies to import
:return: WHTTPCookieJar
|
def __get_view_tmpl(tag_key):
'''
根据分类uid的4位编码来找模板。如果4位的存在,则使用4位的;不然找其父类;再不然则使用通用模板
只有View需要,edit, list使用通用模板
:return String.
'''
the_view_file_4 = './templates/tmpl_{0}/tpl_view_{1}.html'.format(
KIND_DICS['kind_' + tag_key.split('_')[-1]],
tag_key.split('_')[1]
)
the_view_file_2 = './templates/tmpl_{0}/tpl_view_{1}.html'.format(
KIND_DICS['kind_' + tag_key.split('_')[-1]],
tag_key.split('_')[1][:2]
)
if os.path.exists(the_view_file_4):
the_view_sig_str = '_{0}'.format(tag_key.split('_')[1])
elif os.path.exists(the_view_file_2):
the_view_sig_str = '_{0}'.format(tag_key.split('_')[1][:2])
else:
the_view_sig_str = ''
return the_view_sig_str
|
根据分类uid的4位编码来找模板。如果4位的存在,则使用4位的;不然找其父类;再不然则使用通用模板
只有View需要,edit, list使用通用模板
:return String.
|
def transition(self, duration, is_on=None, **kwargs):
"""
Transition to the specified state of the led.
If another transition is already running, it is aborted.
:param duration: The duration of the transition.
:param is_on: The on-off state to transition to.
:param kwargs: The state to transition to.
"""
self._cancel_active_transition()
dest_state = self._prepare_transition(is_on, **kwargs)
total_steps = self._transition_steps(**dest_state)
state_stages = [self._transition_stage(step, total_steps, **dest_state)
for step in range(total_steps)]
pwm_stages = [self._get_pwm_values(**stage)
for stage in state_stages]
callback = partial(self._transition_callback, is_on)
self._active_transition = Transition(self._driver, duration,
state_stages, pwm_stages,
callback)
TransitionManager().execute(self._active_transition)
return self._active_transition
|
Transition to the specified state of the led.
If another transition is already running, it is aborted.
:param duration: The duration of the transition.
:param is_on: The on-off state to transition to.
:param kwargs: The state to transition to.
|
def pad(data, blocksize=16):
"""
Pads data to blocksize according to RFC 4303. Pad length field is included in output.
"""
padlen = blocksize - len(data) % blocksize
return bytes(data + bytearray(range(1, padlen)) + bytearray((padlen - 1,)))
|
Pads data to blocksize according to RFC 4303. Pad length field is included in output.
|
def get_unique_gene_psms(self, genetable, fields, firstjoin):
"""Uniques the results from get_proteins_psms so each PSM as defined
by gene ID / setname / psm_id will only occur once"""
lastgene = None
gpsms_out, gp_ids = [], []
for gpsm in self.get_proteins_psms(genetable, fields, firstjoin):
if gpsm[0] != lastgene:
for outpsm in gpsms_out:
yield outpsm
lastgene = gpsm[0]
gpsms_out, gp_ids = [], []
gp_id = gpsm[0] + gpsm[1] + gpsm[3]
if gp_id not in gp_ids:
gp_ids.append(gp_id)
gpsms_out.append(gpsm)
for outpsm in gpsms_out:
yield outpsm
|
Uniques the results from get_proteins_psms so each PSM as defined
by gene ID / setname / psm_id will only occur once
|
def prepare_output(d):
"""Clean pre-existing links in output directory."""
outDir = os.path.join(d, 'inorder')
if not os.path.exists(outDir):
os.mkdir(outDir)
for f in os.listdir(outDir):
f = os.path.join(outDir, f)
if os.path.islink(f):
os.remove(f)
return outDir
|
Clean pre-existing links in output directory.
|
def remove_reader(self, fd):
" Stop watching the file descriptor for read availability. "
h = msvcrt.get_osfhandle(fd)
if h in self._read_fds:
del self._read_fds[h]
|
Stop watching the file descriptor for read availability.
|
def get_slab_regions(slab, blength=3.5):
"""
Function to get the ranges of the slab regions. Useful for discerning where
the slab ends and vacuum begins if the slab is not fully within the cell
Args:
slab (Structure): Structure object modelling the surface
blength (float, Ang): The bondlength between atoms. You generally
want this value to be larger than the actual bondlengths in
order to find atoms that are part of the slab
"""
fcoords, indices, all_indices = [], [], []
for site in slab:
# find sites with c < 0 (noncontiguous)
neighbors = slab.get_neighbors(site, blength, include_index=True,
include_image=True)
for nn in neighbors:
if nn[0].frac_coords[2] < 0:
# sites are noncontiguous within cell
fcoords.append(nn[0].frac_coords[2])
indices.append(nn[-2])
if nn[-2] not in all_indices:
all_indices.append(nn[-2])
if fcoords:
# If slab is noncontiguous, locate the lowest
# site within the upper region of the slab
while fcoords:
last_fcoords = copy.copy(fcoords)
last_indices = copy.copy(indices)
site = slab[indices[fcoords.index(min(fcoords))]]
neighbors = slab.get_neighbors(site, blength, include_index=True,
include_image=True)
fcoords, indices = [], []
for nn in neighbors:
if 1 > nn[0].frac_coords[2] > 0 and \
nn[0].frac_coords[2] < site.frac_coords[2]:
# sites are noncontiguous within cell
fcoords.append(nn[0].frac_coords[2])
indices.append(nn[-2])
if nn[-2] not in all_indices:
all_indices.append(nn[-2])
# Now locate the highest site within the lower region of the slab
upper_fcoords = []
for site in slab:
if all([nn[-1] not in all_indices for nn in
slab.get_neighbors(site, blength,
include_index=True)]):
upper_fcoords.append(site.frac_coords[2])
coords = copy.copy(last_fcoords) if not fcoords else copy.copy(fcoords)
min_top = slab[last_indices[coords.index(min(coords))]].frac_coords[2]
ranges = [[0, max(upper_fcoords)], [min_top, 1]]
else:
# If the entire slab region is within the slab cell, just
# set the range as the highest and lowest site in the slab
sorted_sites = sorted(slab, key=lambda site: site.frac_coords[2])
ranges = [[sorted_sites[0].frac_coords[2],
sorted_sites[-1].frac_coords[2]]]
return ranges
|
Function to get the ranges of the slab regions. Useful for discerning where
the slab ends and vacuum begins if the slab is not fully within the cell
Args:
slab (Structure): Structure object modelling the surface
blength (float, Ang): The bondlength between atoms. You generally
want this value to be larger than the actual bondlengths in
order to find atoms that are part of the slab
|
def get_older_backup(self, encrypted=None, compressed=None,
content_type=None, database=None, servername=None):
"""
Return the older backup's file name.
:param encrypted: Filter by encrypted or not
:type encrypted: ``bool`` or ``None``
:param compressed: Filter by compressed or not
:type compressed: ``bool`` or ``None``
:param content_type: Filter by media or database backup, must be
``'db'`` or ``'media'``
:type content_type: ``str`` or ``None``
:param database: Filter by source database's name
:type: ``str`` or ``None``
:param servername: Filter by source server's name
:type: ``str`` or ``None``
:returns: Older file
:rtype: ``str``
:raises: FileNotFound: If no backup file is found
"""
files = self.list_backups(encrypted=encrypted, compressed=compressed,
content_type=content_type, database=database,
servername=servername)
if not files:
raise FileNotFound("There's no backup file available.")
return min(files, key=utils.filename_to_date)
|
Return the older backup's file name.
:param encrypted: Filter by encrypted or not
:type encrypted: ``bool`` or ``None``
:param compressed: Filter by compressed or not
:type compressed: ``bool`` or ``None``
:param content_type: Filter by media or database backup, must be
``'db'`` or ``'media'``
:type content_type: ``str`` or ``None``
:param database: Filter by source database's name
:type: ``str`` or ``None``
:param servername: Filter by source server's name
:type: ``str`` or ``None``
:returns: Older file
:rtype: ``str``
:raises: FileNotFound: If no backup file is found
|
def parse(cls, string):
"""
Parse a string and create a metric
"""
match = re.match(r'^(?P<name>[A-Za-z0-9\.\-_]+)\s+' +
'(?P<value>[0-9\.]+)\s+' +
'(?P<timestamp>[0-9\.]+)(\n?)$',
string)
try:
groups = match.groupdict()
# TODO: get precision from value string
return Metric(groups['name'],
groups['value'],
float(groups['timestamp']))
except:
raise DiamondException(
"Metric could not be parsed from string: %s." % string)
|
Parse a string and create a metric
|
def make_command(self, ctx, name, info):
"""
make click sub-command from command info
gotten from xbahn engineer
"""
@self.command()
@click.option("--debug/--no-debug", default=False, help="Show debug information")
@doc(info.get("description"))
def func(*args, **kwargs):
if "debug" in kwargs:
del kwargs["debug"]
fn = getattr(ctx.widget, name)
result = fn(*args, **kwargs)
click.echo("%s: %s> %s" % (ctx.params["host"],name,result))
ctx.conn.close()
ctx.info_name = "%s %s" % (ctx.info_name , ctx.params["host"])
for a in info.get("arguments",[]):
deco = click.argument(*a["args"], **a["kwargs"])
func = deco(func)
for o in info.get("options",[]):
deco = click.option(*o["args"], **o["kwargs"])
func = deco(func)
return func
|
make click sub-command from command info
gotten from xbahn engineer
|
def create_connection(self, alias='default', **kwargs):
"""
Construct an instance of ``elasticsearch.Elasticsearch`` and register
it under given alias.
"""
kwargs.setdefault('serializer', serializer)
conn = self._conns[alias] = Elasticsearch(**kwargs)
return conn
|
Construct an instance of ``elasticsearch.Elasticsearch`` and register
it under given alias.
|
def authenticated_users(func):
"""
This decorator is used to abstract common authentication checking functionality
out of permission checks. It determines which parameter is the request based on name.
"""
is_object_permission = "has_object" in func.__name__
@wraps(func)
def func_wrapper(*args, **kwargs):
request = args[0]
# use second parameter if object permission
if is_object_permission:
request = args[1]
if not(request.user and request.user.is_authenticated):
return False
return func(*args, **kwargs)
return func_wrapper
|
This decorator is used to abstract common authentication checking functionality
out of permission checks. It determines which parameter is the request based on name.
|
def version(self) -> str:
'''Show the version number of Android Debug Bridge.'''
output, _ = self._execute('version')
return output.splitlines()[0].split()[-1]
|
Show the version number of Android Debug Bridge.
|
def get_orientation(width, height):
# type: (int, int) -> Orientation
"""Get viewport orientation from given width and height.
:type width: int
:type height: int
:return: viewport orientation enum
:rtype: Orientation
"""
if width > height:
return Orientation.LANDSCAPE
elif width < height:
return Orientation.PORTRAIT
else:
return Orientation.EQUAL
|
Get viewport orientation from given width and height.
:type width: int
:type height: int
:return: viewport orientation enum
:rtype: Orientation
|
def _Rforce(self,R,phi=0.,t=0.):
"""
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2017-10-16 - Written - Bovy (UofT)
"""
return -R*(1.+R*numpy.sin(3.*phi))
|
NAME:
_Rforce
PURPOSE:
evaluate the radial force for this potential
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
the radial force
HISTORY:
2017-10-16 - Written - Bovy (UofT)
|
def _forbidden_attributes(obj):
"""Return the object without the forbidden attributes."""
for key in list(obj.data.keys()):
if key in list(obj.reserved_keys.keys()):
obj.data.pop(key)
return obj
|
Return the object without the forbidden attributes.
|
def hms2frame(hms, fps):
"""
:param hms: a string, e.g. "01:23:15" for one hour, 23 minutes 15 seconds
:param fps: framerate
:return: frame number
"""
import time
t = time.strptime(hms, "%H:%M:%S")
return (t.tm_hour * 60 * 60 + t.tm_min * 60 + t.tm_sec) * fps
|
:param hms: a string, e.g. "01:23:15" for one hour, 23 minutes 15 seconds
:param fps: framerate
:return: frame number
|
def _hash_pair(first: bytes, second: bytes) -> bytes:
""" Computes the hash of the items in lexicographic order """
if first is None:
return second
if second is None:
return first
if first > second:
return keccak(second + first)
else:
return keccak(first + second)
|
Computes the hash of the items in lexicographic order
|
def is_classvar(tp):
"""Test if the type represents a class variable. Examples::
is_classvar(int) == False
is_classvar(ClassVar) == True
is_classvar(ClassVar[int]) == True
is_classvar(ClassVar[List[T]]) == True
"""
if NEW_TYPING:
return (tp is ClassVar or
isinstance(tp, _GenericAlias) and tp.__origin__ is ClassVar)
try:
from typing import _ClassVar
return type(tp) is _ClassVar
except:
# SMA: support for very old typing module <=3.5.3
return False
|
Test if the type represents a class variable. Examples::
is_classvar(int) == False
is_classvar(ClassVar) == True
is_classvar(ClassVar[int]) == True
is_classvar(ClassVar[List[T]]) == True
|
def min_row_dist_sum_idx(dists):
"""Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum
"""
row_sums = np.apply_along_axis(arr=dists, axis=0, func1d=np.sum)
return row_sums.argmin()
|
Find the index of the row with the minimum row distance sum
This should return the index of the row index with the least distance overall
to all other rows.
Args:
dists (np.array): must be square distance matrix
Returns:
int: index of row with min dist row sum
|
def generate_batch(cls, strategy, size, **kwargs):
"""Generate a batch of instances.
The instances will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
size (int): the number of instances to generate
Returns:
object list: the generated instances
"""
assert strategy in (enums.STUB_STRATEGY, enums.BUILD_STRATEGY, enums.CREATE_STRATEGY)
batch_action = getattr(cls, '%s_batch' % strategy)
return batch_action(size, **kwargs)
|
Generate a batch of instances.
The instances will be created with the given strategy (one of
BUILD_STRATEGY, CREATE_STRATEGY, STUB_STRATEGY).
Args:
strategy (str): the strategy to use for generating the instance.
size (int): the number of instances to generate
Returns:
object list: the generated instances
|
def evaluate(self, path):
""" This method evaluates attributes of the path.
Returns the cause and result of matching.
Both cause and result are returned from filters
that this object contains.
``path`` specifies the path.
"""
result = False
cause = None
for f in self.filters:
cause, result = f.evaluate(path)
if not result:
break
return cause, result
|
This method evaluates attributes of the path.
Returns the cause and result of matching.
Both cause and result are returned from filters
that this object contains.
``path`` specifies the path.
|
def replace_video(api_key, api_secret, local_video_path, video_key, **kwargs):
"""
Function which allows to replace the content of an EXISTING video object.
:param api_key: <string> JWPlatform api-key
:param api_secret: <string> JWPlatform shared-secret
:param local_video_path: <string> Path to media on local machine.
:param video_key: <string> Video's object ID. Can be found within JWPlayer Dashboard.
:param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jw-platform/reference/v1/methods/videos/create.html
:return:
"""
filename = os.path.basename(local_video_path)
# Setup API client
jwplatform_client = jwplatform.Client(api_key, api_secret)
logging.info("Updating Video")
try:
response = jwplatform_client.videos.update(
video_key=video_key,
upload_method='s3',
update_file='True',
**kwargs)
except jwplatform.errors.JWPlatformError as e:
logging.error("Encountered an error updating the video\n{}".format(e))
sys.exit(e.message)
logging.info(response)
# Construct base url for upload
upload_url = '{}://{}{}'.format(
response['link']['protocol'],
response['link']['address'],
response['link']['path']
)
# Query parameters for the upload
query_parameters = response['link']['query']
# HTTP PUT upload using requests
headers = {'Content-Disposition': 'attachment; filename="{}"'.format(filename)}
with open(local_video_path, 'rb') as f:
r = requests.put(upload_url, params=query_parameters, headers=headers, data=f)
logging.info('uploading file {} to url {}'.format(local_video_path, r.url))
logging.info('upload response: {}'.format(r.text))
logging.info(r)
|
Function which allows to replace the content of an EXISTING video object.
:param api_key: <string> JWPlatform api-key
:param api_secret: <string> JWPlatform shared-secret
:param local_video_path: <string> Path to media on local machine.
:param video_key: <string> Video's object ID. Can be found within JWPlayer Dashboard.
:param kwargs: Arguments conforming to standards found @ https://developer.jwplayer.com/jw-platform/reference/v1/methods/videos/create.html
:return:
|
def _message_to_entity(msg, modelclass):
"""Recursive helper for _to_base_type() to convert a message to an entity.
Args:
msg: A Message instance.
modelclass: A Model subclass.
Returns:
An instance of modelclass.
"""
ent = modelclass()
for prop_name, prop in modelclass._properties.iteritems():
if prop._code_name == 'blob_': # TODO: Devise a cleaner test.
continue # That's taken care of later.
value = getattr(msg, prop_name)
if value is not None and isinstance(prop, model.StructuredProperty):
if prop._repeated:
value = [_message_to_entity(v, prop._modelclass) for v in value]
else:
value = _message_to_entity(value, prop._modelclass)
setattr(ent, prop_name, value)
return ent
|
Recursive helper for _to_base_type() to convert a message to an entity.
Args:
msg: A Message instance.
modelclass: A Model subclass.
Returns:
An instance of modelclass.
|
def ReleaseClick(cls):
''' 释放按压操作 '''
element = cls._element()
action = ActionChains(Web.driver)
action.release(element)
action.perform()
|
释放按压操作
|
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
|
Resets the iterator and ignore roll over data
|
def resolveFilenameConflicts(self, dialog=True):
"""Goes through list of DPs to make sure that their destination names
do not clash. Applies new names. Returns True if some conflicts were resolved.
If dialog is True, shows confirrmation dialog."""
resolved = self.wdplv.resolveFilenameConflicts()
if resolved and dialog:
QMessageBox.warning(self, "Filename conflicts", """<P>
<NOBR>PURR has found duplicate destination filenames among your data products.</NOBR>
This is not allowed, so some filenames have been adjusted to avoid name clashes.
Please review the changes before saving this entry.
</P>""",
QMessageBox.Ok, 0)
return resolved
|
Goes through list of DPs to make sure that their destination names
do not clash. Applies new names. Returns True if some conflicts were resolved.
If dialog is True, shows confirrmation dialog.
|
def filehandles(path, openers_list=openers, pattern='', verbose=False):
"""Main function that iterates over list of openers and decides which opener to use.
:param str path: Path.
:param list openers_list: List of openers.
:param str pattern: Regular expression pattern.
:param verbose: Print additional information.
:type verbose: :py:obj:`True` or :py:obj:`False`
:return: Filehandle(s).
"""
if not verbose:
logging.disable(logging.VERBOSE)
for opener in openers_list:
try:
for filehandle in opener(path=path, pattern=pattern, verbose=verbose):
with closing(filehandle):
yield filehandle
break # use the first successful opener function
except (zipfile.BadZipfile, tarfile.ReadError, GZValidationError,
BZ2ValidationError, IOError, NotADirectoryError):
continue
else:
logger.verbose('No opener found for path: "{}"'.format(path))
yield None
|
Main function that iterates over list of openers and decides which opener to use.
:param str path: Path.
:param list openers_list: List of openers.
:param str pattern: Regular expression pattern.
:param verbose: Print additional information.
:type verbose: :py:obj:`True` or :py:obj:`False`
:return: Filehandle(s).
|
def resize_img(fname, targ, path, new_path, fn=None):
"""
Enlarge or shrink a single image to scale, such that the smaller of the height or width dimension is equal to targ.
"""
if fn is None:
fn = resize_fn(targ)
dest = os.path.join(path_for(path, new_path, targ), fname)
if os.path.exists(dest): return
im = Image.open(os.path.join(path, fname)).convert('RGB')
os.makedirs(os.path.split(dest)[0], exist_ok=True)
fn(im).save(dest)
|
Enlarge or shrink a single image to scale, such that the smaller of the height or width dimension is equal to targ.
|
def write_long(self, n, pack=Struct('>I').pack):
"""
Write an integer as an unsigned 32-bit value.
"""
if 0 <= n <= 0xFFFFFFFF:
self._output_buffer.extend(pack(n))
else:
raise ValueError('Long %d out of range 0..0xFFFFFFFF', n)
return self
|
Write an integer as an unsigned 32-bit value.
|
def get_file_to_text(
self, share_name, directory_name, file_name, encoding='utf-8',
start_range=None, end_range=None, range_get_content_md5=None,
progress_callback=None, max_connections=1, max_retries=5,
retry_wait=1.0, timeout=None):
'''
Downloads a file as unicode text, with automatic chunking and progress
notifications. Returns an instance of :class:`File` with properties,
metadata, and content.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param str encoding:
Python encoding to use when decoding the file data.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool range_get_content_md5:
When this header is set to True and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Set to 1 to download the file sequentially.
Set to 2 or greater if you want to download a file larger than 64MB in chunks.
If the file size does not exceed 64MB it will be downloaded in one chunk.
:param int max_retries:
Number of times to retry download of file chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties, content, and metadata.
:rtype: :class:`~azure.storage.file.models.File`
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('encoding', encoding)
file = self.get_file_to_bytes(
share_name,
directory_name,
file_name,
start_range,
end_range,
range_get_content_md5,
progress_callback,
max_connections,
max_retries,
retry_wait,
timeout)
file.content = file.content.decode(encoding)
return file
|
Downloads a file as unicode text, with automatic chunking and progress
notifications. Returns an instance of :class:`File` with properties,
metadata, and content.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param str encoding:
Python encoding to use when decoding the file data.
:param int start_range:
Start of byte range to use for downloading a section of the file.
If no end_range is given, all bytes after the start_range will be downloaded.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for downloading a section of the file.
If end_range is given, start_range must be provided.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool range_get_content_md5:
When this header is set to True and specified together
with the Range header, the service returns the MD5 hash for the
range, as long as the range is less than or equal to 4 MB in size.
:param progress_callback:
Callback for progress with signature function(current, total)
where current is the number of bytes transfered so far, and total is
the size of the file if known.
:type progress_callback: callback function in format of func(current, total)
:param int max_connections:
Set to 1 to download the file sequentially.
Set to 2 or greater if you want to download a file larger than 64MB in chunks.
If the file size does not exceed 64MB it will be downloaded in one chunk.
:param int max_retries:
Number of times to retry download of file chunk if an error occurs.
:param int retry_wait:
Sleep time in secs between retries.
:param int timeout:
The timeout parameter is expressed in seconds. This method may make
multiple calls to the Azure service and the timeout will apply to
each call individually.
:return: A File with properties, content, and metadata.
:rtype: :class:`~azure.storage.file.models.File`
|
def _get_prediction_feature_weights(lgb, X, n_targets):
"""
Return a list of {feat_id: value} dicts with feature weights,
following ideas from http://blog.datadive.net/interpreting-random-forests/
"""
if n_targets == 2:
n_targets = 1
dump = lgb.booster_.dump_model()
tree_info = dump['tree_info']
_compute_node_values(tree_info)
pred_leafs = lgb.booster_.predict(X, pred_leaf=True).reshape(-1, n_targets)
tree_info = np.array(tree_info).reshape(-1, n_targets)
assert pred_leafs.shape == tree_info.shape
res = []
for target in range(n_targets):
feature_weights = defaultdict(float) # type: DefaultDict[Optional[str], float]
for info, leaf_id in zip(tree_info[:, target], pred_leafs[:, target]):
leaf_index, split_index = _get_leaf_split_indices(
info['tree_structure']
)
bias, path = _get_decision_path(leaf_index, split_index, leaf_id)
feature_weights[None] += bias
for feat, value in path:
feature_weights[feat] += value
res.append(dict(feature_weights))
return res
|
Return a list of {feat_id: value} dicts with feature weights,
following ideas from http://blog.datadive.net/interpreting-random-forests/
|
def json_dumps(self, data):
"""
Standardized json.dumps function with separators and sorted keys set
Args:
data (dict or list): data to be dumped
Returns:
string: json
"""
return json.dumps(
data,
separators=(',', ':'),
sort_keys=True,
cls=self.json_encoder,
ensure_ascii=False
).encode('utf8')
|
Standardized json.dumps function with separators and sorted keys set
Args:
data (dict or list): data to be dumped
Returns:
string: json
|
def encode(cls, line):
"""Backslash escape line.value."""
if not line.encoded:
encoding = getattr(line, 'encoding_param', None)
if encoding and encoding.upper() == cls.base64string:
line.value = b64encode(line.value).decode('utf-8')
else:
line.value = backslashEscape(str_(line.value))
line.encoded = True
|
Backslash escape line.value.
|
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
elif isinstance(value, unicode_type):
return value.encode("utf-8")
else:
return str(value)
|
Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
|
def save(self, fp=None):
"""Save to file.
Parameters
----------
fp : `file`, optional
Output file.
"""
self.storage.settings['markov'] = self.get_settings_json()
self.storage.save(fp)
|
Save to file.
Parameters
----------
fp : `file`, optional
Output file.
|
def get_overs_summary(self, match_key):
"""
Calling Overs Summary API
Arg:
match_key: key of the match
Return:
json data
"""
overs_summary_url = self.api_path + "match/" + match_key + "/overs_summary/"
response = self.get_response(overs_summary_url)
return response
|
Calling Overs Summary API
Arg:
match_key: key of the match
Return:
json data
|
def create_pull(self, *args, **kwds):
"""
:calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
:param title: string
:param body: string
:param issue: :class:`github.Issue.Issue`
:param base: string
:param head: string
:param maintainer_can_modify: bool
:rtype: :class:`github.PullRequest.PullRequest`
"""
if len(args) + len(kwds) >= 4:
return self.__create_pull_1(*args, **kwds)
else:
return self.__create_pull_2(*args, **kwds)
|
:calls: `POST /repos/:owner/:repo/pulls <http://developer.github.com/v3/pulls>`_
:param title: string
:param body: string
:param issue: :class:`github.Issue.Issue`
:param base: string
:param head: string
:param maintainer_can_modify: bool
:rtype: :class:`github.PullRequest.PullRequest`
|
def match(self, other, psd=None,
low_frequency_cutoff=None, high_frequency_cutoff=None):
""" Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivelant to the overlap
maximized over time and phase. By default, the other vector will be
resized to match self. This may remove high frequency content or the
end of the vector.
Parameters
----------
other : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
"""
return self.to_frequencyseries().match(other, psd=psd,
low_frequency_cutoff=low_frequency_cutoff,
high_frequency_cutoff=high_frequency_cutoff)
|
Return the match between the two TimeSeries or FrequencySeries.
Return the match between two waveforms. This is equivelant to the overlap
maximized over time and phase. By default, the other vector will be
resized to match self. This may remove high frequency content or the
end of the vector.
Parameters
----------
other : TimeSeries or FrequencySeries
The input vector containing a waveform.
psd : Frequency Series
A power spectral density to weight the overlap.
low_frequency_cutoff : {None, float}, optional
The frequency to begin the match.
high_frequency_cutoff : {None, float}, optional
The frequency to stop the match.
Returns
-------
match: float
index: int
The number of samples to shift to get the match.
|
def render(self):
"""
Creates a composite ``struct`` format and the data to render with it.
The format and data are prefixed with a 32-bit integer denoting the
number of elements, after which each of the items in the array value
are ``render()``-ed and added to the format and data as well.
"""
value = self.value
if value is None:
value = []
fmt = [Int.fmt]
data = [len(value)]
for item_value in value:
if issubclass(self.item_class, Primitive):
item = self.item_class(item_value)
else:
item = item_value
item_format, item_data = item.render()
fmt.extend(item_format)
data.extend(item_data)
return "".join(fmt), data
|
Creates a composite ``struct`` format and the data to render with it.
The format and data are prefixed with a 32-bit integer denoting the
number of elements, after which each of the items in the array value
are ``render()``-ed and added to the format and data as well.
|
def bellman_segmentation(self, x, states):
"""
Divide a univariate time-series, data_frame, into states contiguous segments, using Bellman k-segmentation algorithm on the peak prominences of the data.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:param states: Number of contigous segments.
:type states: int
:return peaks: The peaks in our data_frame.
:rtype peaks: list
:return prominences: Peaks prominences.
:rtype prominences: list
:return bellman_idx: The indices of the segments.
:rtype bellman_idx: list
"""
peaks, prominences = get_signal_peaks_and_prominences(x)
bellman_idx = BellmanKSegment(prominences, states)
return peaks, prominences, bellman_idx
|
Divide a univariate time-series, data_frame, into states contiguous segments, using Bellman k-segmentation algorithm on the peak prominences of the data.
:param x: The time series to assess freeze of gait on. This could be x, y, z or mag_sum_acc.
:type x: pandas.Series
:param states: Number of contigous segments.
:type states: int
:return peaks: The peaks in our data_frame.
:rtype peaks: list
:return prominences: Peaks prominences.
:rtype prominences: list
:return bellman_idx: The indices of the segments.
:rtype bellman_idx: list
|
def _stats(arr, percentiles=(2, 98), **kwargs):
"""
Calculate array statistics.
Attributes
----------
arr: numpy ndarray
Input array data to get the stats from.
percentiles: tuple, optional
Tuple of Min/Max percentiles to compute.
kwargs: dict, optional
These will be passed to the numpy.histogram function.
Returns
-------
dict
numpy array statistics: percentiles, min, max, stdev, histogram
e.g.
{
'pc': [38, 147],
'min': 20,
'max': 180,
'std': 28.123562304138662,
'histogram': [
[1625, 219241, 28344, 15808, 12325, 10687, 8535, 7348, 4656, 1208],
[20.0, 36.0, 52.0, 68.0, 84.0, 100.0, 116.0, 132.0, 148.0, 164.0, 180.0]
]
}
"""
sample, edges = np.histogram(arr[~arr.mask], **kwargs)
return {
"pc": np.percentile(arr[~arr.mask], percentiles).astype(arr.dtype).tolist(),
"min": arr.min().item(),
"max": arr.max().item(),
"std": arr.std().item(),
"histogram": [sample.tolist(), edges.tolist()],
}
|
Calculate array statistics.
Attributes
----------
arr: numpy ndarray
Input array data to get the stats from.
percentiles: tuple, optional
Tuple of Min/Max percentiles to compute.
kwargs: dict, optional
These will be passed to the numpy.histogram function.
Returns
-------
dict
numpy array statistics: percentiles, min, max, stdev, histogram
e.g.
{
'pc': [38, 147],
'min': 20,
'max': 180,
'std': 28.123562304138662,
'histogram': [
[1625, 219241, 28344, 15808, 12325, 10687, 8535, 7348, 4656, 1208],
[20.0, 36.0, 52.0, 68.0, 84.0, 100.0, 116.0, 132.0, 148.0, 164.0, 180.0]
]
}
|
def set_writer_position(self, name, timestamp):
"""Insert a timestamp to keep track of the current writer position"""
execute = self.cursor.execute
execute('DELETE FROM gauged_writer_history WHERE id = %s', (name,))
execute('INSERT INTO gauged_writer_history (id, timestamp) '
'VALUES (%s, %s)', (name, timestamp,))
|
Insert a timestamp to keep track of the current writer position
|
def load_dataloader(self):
"""
Description : Setup the dataloader
"""
input_transform = transforms.Compose([transforms.ToTensor(), \
transforms.Normalize((0.7136, 0.4906, 0.3283), \
(0.1138, 0.1078, 0.0917))])
training_dataset = LipsDataset(self.image_path,
self.align_path,
mode='train',
transform=input_transform,
seq_len=self.seq_len)
self.train_dataloader = mx.gluon.data.DataLoader(training_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers)
valid_dataset = LipsDataset(self.image_path,
self.align_path,
mode='valid',
transform=input_transform,
seq_len=self.seq_len)
self.valid_dataloader = mx.gluon.data.DataLoader(valid_dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=self.num_workers)
|
Description : Setup the dataloader
|
def add_rectangle(self,
north=None,
west=None,
south=None,
east=None,
**kwargs):
""" Adds a rectangle dict to the Map.rectangles attribute
The Google Maps API describes a rectangle using the LatLngBounds
object, which defines the bounds to be drawn. The bounds use the
concept of 2 delimiting points, a northwest and a southeast points,
were each coordinate is defined by each parameter.
It accepts a rectangle dict representation as well.
Args:
north (float): The north latitude
west (float): The west longitude
south (float): The south latitude
east (float): The east longitude
.. _LatLngBoundsLiteral:
https://developers.google.com/maps/documen
tation/javascript/reference#LatLngBoundsLiteral
.. _Rectangles:
https://developers.google.com/maps/documen
tation/javascript/shapes#rectangles
"""
kwargs.setdefault('bounds', {})
if north:
kwargs['bounds']['north'] = north
if west:
kwargs['bounds']['west'] = west
if south:
kwargs['bounds']['south'] = south
if east:
kwargs['bounds']['east'] = east
if set(
('north', 'east', 'south', 'west')
) != set(kwargs['bounds'].keys()):
raise AttributeError('rectangle bounds required to rectangles')
kwargs.setdefault('stroke_color', '#FF0000')
kwargs.setdefault('stroke_opacity', .8)
kwargs.setdefault('stroke_weight', 2)
kwargs.setdefault('fill_color', '#FF0000')
kwargs.setdefault('fill_opacity', .3)
self.rectangles.append(kwargs)
|
Adds a rectangle dict to the Map.rectangles attribute
The Google Maps API describes a rectangle using the LatLngBounds
object, which defines the bounds to be drawn. The bounds use the
concept of 2 delimiting points, a northwest and a southeast points,
were each coordinate is defined by each parameter.
It accepts a rectangle dict representation as well.
Args:
north (float): The north latitude
west (float): The west longitude
south (float): The south latitude
east (float): The east longitude
.. _LatLngBoundsLiteral:
https://developers.google.com/maps/documen
tation/javascript/reference#LatLngBoundsLiteral
.. _Rectangles:
https://developers.google.com/maps/documen
tation/javascript/shapes#rectangles
|
def get_messages(self,
statuses=DEFAULT_MESSAGE_STATUSES,
order="sent_at desc",
offset=None,
count=None,
content=False):
"""Returns a list of messages your account sent.
Messages are sorted by ``order``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order).
Returned data includes various statistics about each message, e.g., ``total_opens``, ``open_rate``, ``total_clicks``, ``unsubs``, ``soft_bounces``. If ``content=True``, the returned data will also include HTML content of each message.
"""
req_data = [ { "status": statuses }, order, fmt_paging(offset, count) ]
service = "query:Message.stats"
if content: service += ", Message.content"
return self.request(service, req_data)
|
Returns a list of messages your account sent.
Messages are sorted by ``order``, starting at an optional integer ``offset``, and optionally limited to the first ``count`` items (in sorted order).
Returned data includes various statistics about each message, e.g., ``total_opens``, ``open_rate``, ``total_clicks``, ``unsubs``, ``soft_bounces``. If ``content=True``, the returned data will also include HTML content of each message.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.