code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def _load_user_dn(self):
"""
Populates self._user_dn with the distinguished name of our user.
This will either construct the DN from a template in
AUTH_LDAP_USER_DN_TEMPLATE or connect to the server and search for it.
If we have to search, we'll cache the DN.
"""
if self._using_simple_bind_mode():
self._user_dn = self._construct_simple_user_dn()
else:
if self.settings.CACHE_TIMEOUT > 0:
cache_key = valid_cache_key(
"django_auth_ldap.user_dn.{}".format(self._username)
)
self._user_dn = cache.get_or_set(
cache_key, self._search_for_user_dn, self.settings.CACHE_TIMEOUT
)
else:
self._user_dn = self._search_for_user_dn()
|
Populates self._user_dn with the distinguished name of our user.
This will either construct the DN from a template in
AUTH_LDAP_USER_DN_TEMPLATE or connect to the server and search for it.
If we have to search, we'll cache the DN.
|
def dict_of_lists_add(dictionary, key, value):
# type: (DictUpperBound, Any, Any) -> None
"""Add value to a list in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to list in dictionary
Returns:
None
"""
list_objs = dictionary.get(key, list())
list_objs.append(value)
dictionary[key] = list_objs
|
Add value to a list in a dictionary by key
Args:
dictionary (DictUpperBound): Dictionary to which to add values
key (Any): Key within dictionary
value (Any): Value to add to list in dictionary
Returns:
None
|
def check(self, func=None, name=None):
"""
A decorator to register a new Dockerflow check to be run
when the /__heartbeat__ endpoint is called., e.g.::
from dockerflow.flask import checks
@dockerflow.check
def storage_reachable():
try:
acme.storage.ping()
except SlowConnectionException as exc:
return [checks.Warning(exc.msg, id='acme.health.0002')]
except StorageException as exc:
return [checks.Error(exc.msg, id='acme.health.0001')]
or using a custom name::
@dockerflow.check(name='acme-storage-check)
def storage_reachable():
# ...
"""
if func is None:
return functools.partial(self.check, name=name)
if name is None:
name = func.__name__
self.logger.info('Registered Dockerflow check %s', name)
@functools.wraps(func)
def decorated_function(*args, **kwargs):
self.logger.info('Called Dockerflow check %s', name)
return func(*args, **kwargs)
self.checks[name] = decorated_function
return decorated_function
|
A decorator to register a new Dockerflow check to be run
when the /__heartbeat__ endpoint is called., e.g.::
from dockerflow.flask import checks
@dockerflow.check
def storage_reachable():
try:
acme.storage.ping()
except SlowConnectionException as exc:
return [checks.Warning(exc.msg, id='acme.health.0002')]
except StorageException as exc:
return [checks.Error(exc.msg, id='acme.health.0001')]
or using a custom name::
@dockerflow.check(name='acme-storage-check)
def storage_reachable():
# ...
|
def generate_daterange(report):
"""
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic?
"""
metadata = report["report_metadata"]
begin_date = human_timestamp_to_datetime(metadata["begin_date"])
end_date = human_timestamp_to_datetime(metadata["end_date"])
begin_date_human = begin_date.strftime("%Y-%m-%dT%H:%M:%S")
end_date_human = end_date.strftime("%Y-%m-%dT%H:%M:%S")
date_range = [begin_date_human,
end_date_human]
logger.debug("date_range is {}".format(date_range))
return date_range
|
Creates a date_range timestamp with format YYYY-MM-DD-T-HH:MM:SS
based on begin and end dates for easier parsing in Kibana.
Move to utils to avoid duplication w/ elastic?
|
def metastable_sets(self):
"""
Crisp clustering using PCCA. This is only recommended for visualization purposes. You *cannot* compute any
actual quantity of the coarse-grained kinetics without employing the fuzzy memberships!
Returns
-------
A list of length equal to metastable states. Each element is an array with microstate indexes contained in it
"""
res = []
assignment = self.metastable_assignment
for i in range(self.m):
res.append(np.where(assignment == i)[0])
return res
|
Crisp clustering using PCCA. This is only recommended for visualization purposes. You *cannot* compute any
actual quantity of the coarse-grained kinetics without employing the fuzzy memberships!
Returns
-------
A list of length equal to metastable states. Each element is an array with microstate indexes contained in it
|
async def dict(self, full):
'''
Open a HiveDict at the given full path.
'''
node = await self.open(full)
return await HiveDict.anit(self, node)
|
Open a HiveDict at the given full path.
|
def retry(self):
"""Retry to connect to deCONZ."""
self.state = STATE_STARTING
self.loop.call_later(RETRY_TIMER, self.start)
_LOGGER.debug('Reconnecting to deCONZ in %i.', RETRY_TIMER)
|
Retry to connect to deCONZ.
|
def get_histories_over_repetitions(self, exp, tags, aggregate):
""" this function gets all histories of all repetitions using get_history() on the given
tag(s), and then applies the function given by 'aggregate' to all corresponding values
in each history over all iterations. Typical aggregate functions could be 'mean' or
'max'.
"""
params = self.get_params(exp)
# explicitly make tags list in case of 'all'
if tags == 'all':
tags = self.get_history(exp, 0, 'all').keys()
# make list of tags if it is just a string
if not hasattr(tags, '__iter__'):
tags = [tags]
results = {}
for tag in tags:
# get all histories
histories = zeros((params['repetitions'], params['iterations']))
skipped = []
for i in range(params['repetitions']):
try:
histories[i, :] = self.get_history(exp, i, tag)
except ValueError:
h = self.get_history(exp, i, tag)
if len(h) == 0:
# history not existent, skip it
print('warning: history %i has length 0 (expected: %i). it will be skipped.'%(i, params['iterations']))
skipped.append(i)
elif len(h) > params['iterations']:
# if history too long, crop it
print('warning: history %i has length %i (expected: %i). it will be truncated.'%(i, len(h), params['iterations']))
h = h[:params['iterations']]
histories[i,:] = h
elif len(h) < params['iterations']:
# if history too short, crop everything else
print('warning: history %i has length %i (expected: %i). all other histories will be truncated.'%(i, len(h), params['iterations']))
params['iterations'] = len(h)
histories = histories[:,:params['iterations']]
histories[i, :] = h
# remove all rows that have been skipped
histories = delete(histories, skipped, axis=0)
params['repetitions'] -= len(skipped)
# calculate result from each column with aggregation function
aggregated = zeros(params['iterations'])
for i in range(params['iterations']):
aggregated[i] = aggregate(histories[:, i])
# if only one tag is requested, return list immediately, otherwise append to dictionary
if len(tags) == 1:
return aggregated
else:
results[tag] = aggregated
return results
|
this function gets all histories of all repetitions using get_history() on the given
tag(s), and then applies the function given by 'aggregate' to all corresponding values
in each history over all iterations. Typical aggregate functions could be 'mean' or
'max'.
|
def parse(self):
""" Parse metafile and check pre-conditions.
"""
try:
if not os.path.getsize(self.ns.pathname):
# Ignore 0-byte dummy files (Firefox creates these while downloading)
self.job.LOG.warn("Ignoring 0-byte metafile '%s'" % (self.ns.pathname,))
return
self.metadata = metafile.checked_open(self.ns.pathname)
except EnvironmentError as exc:
self.job.LOG.error("Can't read metafile '%s' (%s)" % (
self.ns.pathname, str(exc).replace(": '%s'" % self.ns.pathname, ""),
))
return
except ValueError as exc:
self.job.LOG.error("Invalid metafile '%s': %s" % (self.ns.pathname, exc))
return
self.ns.info_hash = metafile.info_hash(self.metadata)
self.ns.info_name = self.metadata["info"]["name"]
self.job.LOG.info("Loaded '%s' from metafile '%s'" % (self.ns.info_name, self.ns.pathname))
# Check whether item is already loaded
try:
name = self.job.proxy.d.name(self.ns.info_hash, fail_silently=True)
except xmlrpc.HashNotFound:
pass
except xmlrpc.ERRORS as exc:
if exc.faultString != "Could not find info-hash.":
self.job.LOG.error("While checking for #%s: %s" % (self.ns.info_hash, exc))
return
else:
self.job.LOG.warn("Item #%s '%s' already added to client" % (self.ns.info_hash, name))
return
return True
|
Parse metafile and check pre-conditions.
|
def from_keras_log(csv_path, output_dir_path, **kwargs):
"""Plot accuracy and loss from a Keras CSV log.
Args:
csv_path: The path to the CSV log with the actual data.
output_dir_path: The path to the directory where the resultings plots
should end up.
"""
# automatically get seperator by using Python's CSV parser
data = pd.read_csv(csv_path, sep=None, engine='python')
_from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs)
|
Plot accuracy and loss from a Keras CSV log.
Args:
csv_path: The path to the CSV log with the actual data.
output_dir_path: The path to the directory where the resultings plots
should end up.
|
def content_written(generator, content):
"""
create a url and call make posts (which has less information)
"""
url = "%s/%s" % (generator.settings.get('SITEURL', 'http://localhost:8000'), content.url)
make_posts(generator, content.metadata, url)
|
create a url and call make posts (which has less information)
|
def client_config(path, env_var='SALT_CLIENT_CONFIG', defaults=None):
'''
Load Master configuration data
Usage:
.. code-block:: python
import salt.config
master_opts = salt.config.client_config('/etc/salt/master')
Returns a dictionary of the Salt Master configuration file with necessary
options needed to communicate with a locally-running Salt Master daemon.
This function searches for client specific configurations and adds them to
the data from the master configuration.
This is useful for master-side operations like
:py:class:`~salt.client.LocalClient`.
'''
if defaults is None:
defaults = DEFAULT_MASTER_OPTS.copy()
xdg_dir = salt.utils.xdg.xdg_config_dir()
if os.path.isdir(xdg_dir):
client_config_dir = xdg_dir
saltrc_config_file = 'saltrc'
else:
client_config_dir = os.path.expanduser('~')
saltrc_config_file = '.saltrc'
# Get the token file path from the provided defaults. If not found, specify
# our own, sane, default
opts = {
'token_file': defaults.get(
'token_file',
os.path.join(client_config_dir, 'salt_token')
)
}
# Update options with the master configuration, either from the provided
# path, salt's defaults or provided defaults
opts.update(
master_config(path, defaults=defaults)
)
# Update with the users salt dot file or with the environment variable
saltrc_config = os.path.join(client_config_dir, saltrc_config_file)
opts.update(
load_config(
saltrc_config,
env_var,
saltrc_config
)
)
# Make sure we have a proper and absolute path to the token file
if 'token_file' in opts:
opts['token_file'] = os.path.abspath(
os.path.expanduser(
opts['token_file']
)
)
# If the token file exists, read and store the contained token
if os.path.isfile(opts['token_file']):
# Make sure token is still valid
expire = opts.get('token_expire', 43200)
if os.stat(opts['token_file']).st_mtime + expire > time.mktime(time.localtime()):
with salt.utils.files.fopen(opts['token_file']) as fp_:
opts['token'] = fp_.read().strip()
# On some platforms, like OpenBSD, 0.0.0.0 won't catch a master running on localhost
if opts['interface'] == '0.0.0.0':
opts['interface'] = '127.0.0.1'
# Make sure the master_uri is set
if 'master_uri' not in opts:
opts['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=salt.utils.zeromq.ip_bracket(opts['interface']),
port=opts['ret_port']
)
# Return the client options
_validate_opts(opts)
return opts
|
Load Master configuration data
Usage:
.. code-block:: python
import salt.config
master_opts = salt.config.client_config('/etc/salt/master')
Returns a dictionary of the Salt Master configuration file with necessary
options needed to communicate with a locally-running Salt Master daemon.
This function searches for client specific configurations and adds them to
the data from the master configuration.
This is useful for master-side operations like
:py:class:`~salt.client.LocalClient`.
|
def find_or_create_role(self, name, **kwargs):
"""Returns a role matching the given name or creates it with any
additionally provided parameters.
"""
kwargs["name"] = name
return self.find_role(name) or self.create_role(**kwargs)
|
Returns a role matching the given name or creates it with any
additionally provided parameters.
|
def _populate_alternate_kwargs(kwargs):
""" Translates the parsed arguments into a format used by generic ARM commands
such as the resource and lock commands.
"""
resource_namespace = kwargs['namespace']
resource_type = kwargs.get('child_type_{}'.format(kwargs['last_child_num'])) or kwargs['type']
resource_name = kwargs.get('child_name_{}'.format(kwargs['last_child_num'])) or kwargs['name']
_get_parents_from_parts(kwargs)
kwargs['resource_namespace'] = resource_namespace
kwargs['resource_type'] = resource_type
kwargs['resource_name'] = resource_name
return kwargs
|
Translates the parsed arguments into a format used by generic ARM commands
such as the resource and lock commands.
|
def organizations(self, user, include=None):
"""
Retrieve the organizations for this user.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param user: User object or id
"""
return self._query_zendesk(self.endpoint.organizations, 'organization', id=user, include=include)
|
Retrieve the organizations for this user.
:param include: list of objects to sideload. `Side-loading API Docs
<https://developer.zendesk.com/rest_api/docs/core/side_loading>`__.
:param user: User object or id
|
def incoming(self, packet):
"""
Callback for data received from the copter.
"""
# This might be done prettier ;-)
console_text = packet.data.decode('UTF-8')
self.receivedChar.call(console_text)
|
Callback for data received from the copter.
|
async def info(self, token):
"""Queries the policy of a given token.
Parameters:
token (ObjectID): Token ID
Returns:
ObjectMeta: where value is token
Raises:
NotFound:
It returns a body like this::
{
"CreateIndex": 3,
"ModifyIndex": 3,
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "Client Token",
"Type": "client",
"Rules": {
"key": {
"": {
"policy": "read"
},
"private/": {
"policy": "deny"
}
}
}
}
"""
token_id = extract_attr(token, keys=["ID"])
response = await self._api.get("/v1/acl/info", token_id)
meta = extract_meta(response.headers)
try:
result = decode_token(response.body[0])
except IndexError:
raise NotFound(response.body, meta=meta)
return consul(result, meta=meta)
|
Queries the policy of a given token.
Parameters:
token (ObjectID): Token ID
Returns:
ObjectMeta: where value is token
Raises:
NotFound:
It returns a body like this::
{
"CreateIndex": 3,
"ModifyIndex": 3,
"ID": "8f246b77-f3e1-ff88-5b48-8ec93abf3e05",
"Name": "Client Token",
"Type": "client",
"Rules": {
"key": {
"": {
"policy": "read"
},
"private/": {
"policy": "deny"
}
}
}
}
|
def check_status(self, **kwargs):
"""
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
"""
for work in self:
work.check_status()
if kwargs.pop("show", False):
self.show_status(**kwargs)
|
Check the status of the works in self.
Args:
show: True to show the status of the flow.
kwargs: keyword arguments passed to show_status
|
def tags(self, resource_id=None):
"""Tag endpoint for this resource with optional tag name.
This method will set the resource endpoint for working with Tags. The
HTTP GET method will return all tags applied to this resource or if a
resource id (tag name) is provided it will return the provided tag if
it has been applied, which could be useful to verify a tag is applied.
The provided resource_id (tag) can be applied to this resource using
the HTTP POST method. The HTTP DELETE method will remove the provided
tag from this resource.
**Example Endpoints URI's**
+--------------+------------------------------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+============================================================+
| GET | /v2/groups/{resourceType}/{uniqueId}/tags |
+--------------+------------------------------------------------------------+
| GET | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| GET | /v2/indicators/{resourceType}/{uniqueId}/tags |
+--------------+------------------------------------------------------------+
| GET | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| DELETE | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| DELETE | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| POST | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| POST | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
Args:
resource_id (Optional [string]): The resource id (tag name).
"""
resource = self.copy()
resource._request_entity = 'tag'
resource._request_uri = '{}/tags'.format(resource._request_uri)
if resource_id is not None:
resource._request_uri = '{}/{}'.format(
resource._request_uri, self.tcex.safetag(resource_id)
)
return resource
|
Tag endpoint for this resource with optional tag name.
This method will set the resource endpoint for working with Tags. The
HTTP GET method will return all tags applied to this resource or if a
resource id (tag name) is provided it will return the provided tag if
it has been applied, which could be useful to verify a tag is applied.
The provided resource_id (tag) can be applied to this resource using
the HTTP POST method. The HTTP DELETE method will remove the provided
tag from this resource.
**Example Endpoints URI's**
+--------------+------------------------------------------------------------+
| HTTP Method | API Endpoint URI's |
+==============+============================================================+
| GET | /v2/groups/{resourceType}/{uniqueId}/tags |
+--------------+------------------------------------------------------------+
| GET | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| GET | /v2/indicators/{resourceType}/{uniqueId}/tags |
+--------------+------------------------------------------------------------+
| GET | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| DELETE | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| DELETE | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| POST | /v2/groups/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
| POST | /v2/indicators/{resourceType}/{uniqueId}/tags/{resourceId} |
+--------------+------------------------------------------------------------+
Args:
resource_id (Optional [string]): The resource id (tag name).
|
def local_services(self):
"""Get a list of id, name pairs for all of the known synced services.
This method is safe to call outside of the background event loop
without any race condition. Internally it uses a thread-safe mutex to
protect the local copies of supervisor data and ensure that it cannot
change while this method is iterating over it.
Returns:
list (id, name): A list of tuples with id and service name sorted by id
from low to high
"""
if not self._loop.inside_loop():
self._state_lock.acquire()
try:
return sorted([(index, name) for index, name in self._name_map.items()], key=lambda element: element[0])
finally:
if not self._loop.inside_loop():
self._state_lock.release()
|
Get a list of id, name pairs for all of the known synced services.
This method is safe to call outside of the background event loop
without any race condition. Internally it uses a thread-safe mutex to
protect the local copies of supervisor data and ensure that it cannot
change while this method is iterating over it.
Returns:
list (id, name): A list of tuples with id and service name sorted by id
from low to high
|
def check(projects):
"""Check the specified projects for Python 3 compatibility."""
log = logging.getLogger('ciu')
log.info('{0} top-level projects to check'.format(len(projects)))
print('Finding and checking dependencies ...')
blockers = dependencies.blockers(projects)
print('')
for line in message(blockers):
print(line)
print('')
for line in pprint_blockers(blockers):
print(' ', line)
return len(blockers) == 0
|
Check the specified projects for Python 3 compatibility.
|
def data_from_file(self, file, apple_fix=False):
"""
Read iCal data from file.
:param file: file to read
:param apple_fix: fix wrong Apple tzdata in iCal
:return: decoded (and fixed) iCal data
"""
with open(file, mode='rb') as f:
content = f.read()
if not content:
raise IOError("File %f is not readable or is empty!" % file)
return self.decode(content, apple_fix=apple_fix)
|
Read iCal data from file.
:param file: file to read
:param apple_fix: fix wrong Apple tzdata in iCal
:return: decoded (and fixed) iCal data
|
def unwrap(self, dt):
"""
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
"""
expires = self._expires
if expires is AlwaysExpired or expires < dt:
raise Expired(self._expires)
return self._value
|
Get the cached value.
Returns
-------
value : object
The cached value.
Raises
------
Expired
Raised when `dt` is greater than self.expires.
|
def receive_message(self, msg):
"""Receive a message sent to this device."""
_LOGGER.debug('Starting X10Device.receive_message')
if hasattr(msg, 'isack') and msg.isack:
_LOGGER.debug('Got Message ACK')
if self._send_msg_lock.locked():
self._send_msg_lock.release()
callbacks = self._message_callbacks.get_callbacks_from_message(msg)
_LOGGER.debug('Found %d callbacks for msg %s', len(callbacks), msg)
for callback in callbacks:
_LOGGER.debug('Scheduling msg callback: %s', callback)
self._plm.loop.call_soon(callback, msg)
self._last_communication_received = datetime.datetime.now()
_LOGGER.debug('Ending Device.receive_message')
|
Receive a message sent to this device.
|
def good(txt):
"""Print, emphasized 'good', the given 'txt' message"""
print("%s# %s%s%s" % (PR_GOOD_CC, get_time_stamp(), txt, PR_NC))
sys.stdout.flush()
|
Print, emphasized 'good', the given 'txt' message
|
def fitted(self, fid=0):
"""Test if enough Levenberg-Marquardt loops have been done.
It returns True if no improvement possible.
:param fid: the id of the sub-fitter (numerical)
"""
self._checkid(fid)
return not (self._fitids[fid]["fit"] > 0
or self._fitids[fid]["fit"] < -0.001)
|
Test if enough Levenberg-Marquardt loops have been done.
It returns True if no improvement possible.
:param fid: the id of the sub-fitter (numerical)
|
def update_policy(self,defaultHeaders):
""" rewrite update policy so that additional pins are added and not overwritten """
if self.inputs is not None:
for k,v in defaultHeaders.items():
if k not in self.inputs:
self.inputs[k] = v
if k == 'pins':
self.inputs[k] = self.inputs[k] + defaultHeaders[k]
return self.inputs
else:
return self.inputs
|
rewrite update policy so that additional pins are added and not overwritten
|
def copyh5(inh5, outh5):
"""Recursively copy all hdf5 data from one group to another
Data from links is copied.
Parameters
----------
inh5: str, h5py.File, or h5py.Group
The input hdf5 data. This can be either a file name or
an hdf5 object.
outh5: str, h5py.File, h5py.Group, or None
The output hdf5 data. This can be either a file name or
an hdf5 object. If set to `None`, a new hdf5 object is
created in memory.
Notes
-----
All data in outh5 are overridden by the inh5 data.
"""
if not isinstance(inh5, h5py.Group):
inh5 = h5py.File(inh5, mode="r")
if outh5 is None:
# create file in memory
h5kwargs = {"name": "qpimage{}.h5".format(QPImage._instances),
"driver": "core",
"backing_store": False,
"mode": "a"}
outh5 = h5py.File(**h5kwargs)
return_h5obj = True
QPImage._instances += 1
elif not isinstance(outh5, h5py.Group):
# create new file
outh5 = h5py.File(outh5, mode="w")
return_h5obj = False
else:
return_h5obj = True
# begin iteration
for key in inh5:
if key in outh5:
del outh5[key]
if isinstance(inh5[key], h5py.Group):
outh5.create_group(key)
copyh5(inh5[key], outh5[key])
else:
dset = write_image_dataset(group=outh5,
key=key,
data=inh5[key][:],
h5dtype=inh5[key].dtype)
dset.attrs.update(inh5[key].attrs)
outh5.attrs.update(inh5.attrs)
if return_h5obj:
# in-memory or previously created instance of h5py.File
return outh5
else:
# properly close the file and return its name
fn = outh5.filename
outh5.flush()
outh5.close()
return fn
|
Recursively copy all hdf5 data from one group to another
Data from links is copied.
Parameters
----------
inh5: str, h5py.File, or h5py.Group
The input hdf5 data. This can be either a file name or
an hdf5 object.
outh5: str, h5py.File, h5py.Group, or None
The output hdf5 data. This can be either a file name or
an hdf5 object. If set to `None`, a new hdf5 object is
created in memory.
Notes
-----
All data in outh5 are overridden by the inh5 data.
|
def issue_type_by_name(self, name):
"""
:param name: Name of the issue type
:type name: str
:rtype: IssueType
"""
issue_types = self.issue_types()
try:
issue_type = [it for it in issue_types if it.name == name][0]
except IndexError:
raise KeyError("Issue type '%s' is unknown." % name)
return issue_type
|
:param name: Name of the issue type
:type name: str
:rtype: IssueType
|
def add_waveform(self, waveform):
"""
Add a waveform to the plot.
:param waveform: the waveform to be added
:type waveform: :class:`~aeneas.plotter.PlotWaveform`
:raises: TypeError: if ``waveform`` is not an instance of :class:`~aeneas.plotter.PlotWaveform`
"""
if not isinstance(waveform, PlotWaveform):
self.log_exc(u"waveform must be an instance of PlotWaveform", None, True, TypeError)
self.waveform = waveform
self.log(u"Added waveform")
|
Add a waveform to the plot.
:param waveform: the waveform to be added
:type waveform: :class:`~aeneas.plotter.PlotWaveform`
:raises: TypeError: if ``waveform`` is not an instance of :class:`~aeneas.plotter.PlotWaveform`
|
def get_value(self, key):
"""Extract a value for a given key."""
for title in _TITLES.get(key, ()) + (key,):
try:
value = [entry['lastMeasurement']['value'] for entry in
self.data['sensors'] if entry['title'] == title][0]
return value
except IndexError:
pass
return None
|
Extract a value for a given key.
|
def tip_zscores(a):
"""
Calculates the "target identification from profiles" (TIP) zscores
from Cheng et al. 2001, Bioinformatics 27(23):3221-3227.
:param a: NumPy array, where each row is the signal for a feature.
"""
weighted = a * a.mean(axis=0)
scores = weighted.sum(axis=1)
zscores = (scores - scores.mean()) / scores.std()
return zscores
|
Calculates the "target identification from profiles" (TIP) zscores
from Cheng et al. 2001, Bioinformatics 27(23):3221-3227.
:param a: NumPy array, where each row is the signal for a feature.
|
def mbar_W_nk(u_kn, N_k, f_k):
"""Calculate the weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
W_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized weights.
Notes
-----
Equation (9) in JCP MBAR paper.
"""
return np.exp(mbar_log_W_nk(u_kn, N_k, f_k))
|
Calculate the weight matrix.
Parameters
----------
u_kn : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
N_k : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state
f_k : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies of each state
Returns
-------
W_nk : np.ndarray, dtype='float', shape=(n_samples, n_states)
The normalized weights.
Notes
-----
Equation (9) in JCP MBAR paper.
|
def current_line_num(self):
'''Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
'''
if self.is_optimized_out():
return None
f_trace = self.field('f_trace')
if long(f_trace) != 0:
# we have a non-NULL f_trace:
return self.f_lineno
else:
#try:
return self.co.addr2line(self.f_lasti)
|
Get current line number as an integer (1-based)
Translated from PyFrame_GetLineNumber and PyCode_Addr2Line
See Objects/lnotab_notes.txt
|
def run_object_query(client, base_object_query, start_record, limit_to,
verbose=False):
"""inline method to take advantage of retry"""
if verbose:
print("[start: %d limit: %d]" % (start_record, limit_to))
start = datetime.datetime.now()
result = client.execute_object_query(
object_query=base_object_query,
start_record=start_record,
limit_to=limit_to)
end = datetime.datetime.now()
if verbose:
print("[%s - %s]" % (start, end))
return result
|
inline method to take advantage of retry
|
def _raise_missing_antenna_errors(ant_uvw, max_err):
""" Raises an informative error for missing antenna """
# Find antenna uvw coordinates where any UVW component was nan
# nan + real == nan
problems = np.nonzero(np.add.reduce(np.isnan(ant_uvw), axis=2))
problem_str = []
for c, a in zip(*problems):
problem_str.append("[chunk %d antenna %d]" % (c, a))
# Exit early
if len(problem_str) >= max_err:
break
# Return early if nothing was wrong
if len(problem_str) == 0:
return
# Add a preamble and raise exception
problem_str = ["Antenna were missing"] + problem_str
raise AntennaMissingError('\n'.join(problem_str))
|
Raises an informative error for missing antenna
|
def time_logger(name):
"""This logs the time usage of a code block"""
start_time = time.time()
yield
end_time = time.time()
total_time = end_time - start_time
logging.info("%s; time: %ss", name, total_time)
|
This logs the time usage of a code block
|
def init_params(self, initializer=Uniform(0.01), arg_params=None, aux_params=None,
allow_missing=False, force_init=False, allow_extra=False):
"""Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Defaults to ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Defaults to ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Defaults to ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
"""
if self.params_initialized and not force_init:
return
assert self.binded, 'call bind before initializing the parameters'
self._curr_module.init_params(initializer=initializer, arg_params=arg_params,
aux_params=aux_params, allow_missing=allow_missing,
force_init=force_init, allow_extra=allow_extra)
self._params_dirty = False
self.params_initialized = True
|
Initializes parameters.
Parameters
----------
initializer : Initializer
arg_params : dict
Defaults to ``None``. Existing parameters. This has higher priority
than `initializer`.
aux_params : dict
Defaults to ``None``. Existing auxiliary states. This has higher priority
than `initializer`.
allow_missing : bool
Allow missing values in `arg_params` and `aux_params` (if not ``None``).
In this case, missing values will be filled with `initializer`.
force_init : bool
Defaults to ``False``.
allow_extra : boolean, optional
Whether allow extra parameters that are not needed by symbol.
If this is True, no error will be thrown when arg_params or aux_params
contain extra parameters that is not needed by the executor.
|
def add_to_configs(self, configs):
"""Add one or more measurement configurations to the stored
configurations
Parameters
----------
configs: list or numpy.ndarray
list or array of configurations
Returns
-------
configs: Kx4 numpy.ndarray
array holding all configurations of this instance
"""
if len(configs) == 0:
return None
if self.configs is None:
self.configs = np.atleast_2d(configs)
else:
configs = np.atleast_2d(configs)
self.configs = np.vstack((self.configs, configs))
return self.configs
|
Add one or more measurement configurations to the stored
configurations
Parameters
----------
configs: list or numpy.ndarray
list or array of configurations
Returns
-------
configs: Kx4 numpy.ndarray
array holding all configurations of this instance
|
def weight_statistics(self):
"""
Extract a statistical summary of edge weights present in
the graph.
:return: A dict with an 'all_weights' list, 'minimum',
'maximum', 'median', 'mean', 'std_dev'
"""
all_weights = [d.get('weight', None) for u, v, d
in self.graph.edges(data=True)]
stats = describe(all_weights, nan_policy='omit')
return {
'all_weights': all_weights,
'min': stats.minmax[0],
'max': stats.minmax[1],
'mean': stats.mean,
'variance': stats.variance
}
|
Extract a statistical summary of edge weights present in
the graph.
:return: A dict with an 'all_weights' list, 'minimum',
'maximum', 'median', 'mean', 'std_dev'
|
def purge_module(self, module_name):
"""
A module has been removed e.g. a module that had an error.
We need to find any containers and remove the module from them.
"""
containers = self.config["py3_config"][".module_groups"]
containers_to_update = set()
if module_name in containers:
containers_to_update.update(set(containers[module_name]))
for container in containers_to_update:
try:
self.modules[container].module_class.items.remove(module_name)
except ValueError:
pass
|
A module has been removed e.g. a module that had an error.
We need to find any containers and remove the module from them.
|
def generate(self, z_mu=None):
""" Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
"""
if z_mu is None:
z_mu = np.random.normal(size=self.network_architecture["n_z"])
# Note: This maps to mean of distribution, we could alternatively
# sample from Gaussian distribution
return self.sess.run(self.x_reconstr_mean,
feed_dict={self.z: z_mu})
|
Generate data by sampling from latent space.
If z_mu is not None, data for this point in latent space is
generated. Otherwise, z_mu is drawn from prior in latent
space.
|
def choi_matrix(pauli_tm, basis):
"""
Compute the Choi matrix for a quantum process from its Pauli Transfer Matrix.
This agrees with the definition in
`Chow et al. <https://doi.org/10.1103/PhysRevLett.109.060501>`_
except for a different overall normalization.
Our normalization agrees with that of qutip.
:param numpy.ndarray pauli_tm: The Pauli Transfer Matrix as 2d-array.
:param OperatorBasis basis: The operator basis, typically products of normalized Paulis.
:return: The Choi matrix as qutip.Qobj.
:rtype: qutip.Qobj
"""
if not basis.is_orthonormal(): # pragma no coverage
raise ValueError("Need an orthonormal operator basis.")
if not all((is_hermitian(op) for op in basis.ops)): # pragma no coverage
raise ValueError("Need an operator basis of hermitian operators.")
sbasis = basis.super_basis()
D = basis.dim
choi = sum((pauli_tm[jj, kk] * sbasis.ops[jj + kk * D] for jj in range(D) for kk in range(D)))
choi.superrep = CHOI
return choi
|
Compute the Choi matrix for a quantum process from its Pauli Transfer Matrix.
This agrees with the definition in
`Chow et al. <https://doi.org/10.1103/PhysRevLett.109.060501>`_
except for a different overall normalization.
Our normalization agrees with that of qutip.
:param numpy.ndarray pauli_tm: The Pauli Transfer Matrix as 2d-array.
:param OperatorBasis basis: The operator basis, typically products of normalized Paulis.
:return: The Choi matrix as qutip.Qobj.
:rtype: qutip.Qobj
|
def validate(self, data):
"""
Validated data using defined regex.
:param data: data to be validated
:return: return validated data.
"""
e = self._error
try:
if self._pattern.search(data):
return data
else:
raise SchemaError("%r does not match %r" % (self, data), e)
except TypeError:
raise SchemaError("%r is not string nor buffer" % data, e)
|
Validated data using defined regex.
:param data: data to be validated
:return: return validated data.
|
def get_dataframe_from_data(data):
"""
Parameters
----------
data : string or pandas dataframe.
If string, data should be an absolute or relative path to a CSV file
containing the long format data for this choice model. Note long format
has one row per available alternative for each observation. If pandas
dataframe, the dataframe should be the long format data for the choice
model.
Returns
-------
dataframe : pandas dataframe of the long format data for the choice model.
"""
if isinstance(data, str):
if data.endswith(".csv"):
dataframe = pd.read_csv(data)
else:
msg_1 = "data = {} is of unknown file type."
msg_2 = " Please pass path to csv."
raise ValueError(msg_1.format(data) + msg_2)
elif isinstance(data, pd.DataFrame):
dataframe = data
else:
msg_1 = "type(data) = {} is an invalid type."
msg_2 = " Please pass pandas dataframe or path to csv."
raise TypeError(msg_1.format(type(data)) + msg_2)
return dataframe
|
Parameters
----------
data : string or pandas dataframe.
If string, data should be an absolute or relative path to a CSV file
containing the long format data for this choice model. Note long format
has one row per available alternative for each observation. If pandas
dataframe, the dataframe should be the long format data for the choice
model.
Returns
-------
dataframe : pandas dataframe of the long format data for the choice model.
|
def _onArgument(self, name, annotation):
"""Memorizes a function argument"""
self.objectsStack[-1].arguments.append(Argument(name, annotation))
|
Memorizes a function argument
|
def update_state(self,
slots: Union[List[Tuple[str, Any]], Dict[str, Any]]) -> 'Tracker':
"""
Updates dialogue state with new ``slots``, calculates features.
Returns:
Tracker: ."""
pass
|
Updates dialogue state with new ``slots``, calculates features.
Returns:
Tracker: .
|
def set_data(self, data=None, **kwargs):
"""Set the line data
Parameters
----------
data : array-like
The data.
**kwargs : dict
Keywoard arguments to pass to MarkerVisual and LineVisal.
"""
if data is None:
pos = None
else:
if isinstance(data, tuple):
pos = np.array(data).T.astype(np.float32)
else:
pos = np.atleast_1d(data).astype(np.float32)
if pos.ndim == 1:
pos = pos[:, np.newaxis]
elif pos.ndim > 2:
raise ValueError('data must have at most two dimensions')
if pos.size == 0:
pos = self._line.pos
# if both args and keywords are zero, then there is no
# point in calling this function.
if len(kwargs) == 0:
raise TypeError("neither line points nor line properties"
"are provided")
elif pos.shape[1] == 1:
x = np.arange(pos.shape[0], dtype=np.float32)[:, np.newaxis]
pos = np.concatenate((x, pos), axis=1)
# if args are empty, don't modify position
elif pos.shape[1] > 3:
raise TypeError("Too many coordinates given (%s; max is 3)."
% pos.shape[1])
# todo: have both sub-visuals share the same buffers.
line_kwargs = {}
for k in self._line_kwargs:
if k in kwargs:
k_ = self._kw_trans[k] if k in self._kw_trans else k
line_kwargs[k] = kwargs.pop(k_)
if pos is not None or len(line_kwargs) > 0:
self._line.set_data(pos=pos, **line_kwargs)
marker_kwargs = {}
for k in self._marker_kwargs:
if k in kwargs:
k_ = self._kw_trans[k] if k in self._kw_trans else k
marker_kwargs[k_] = kwargs.pop(k)
if pos is not None or len(marker_kwargs) > 0:
self._markers.set_data(pos=pos, **marker_kwargs)
if len(kwargs) > 0:
raise TypeError("Invalid keyword arguments: %s" % kwargs.keys())
|
Set the line data
Parameters
----------
data : array-like
The data.
**kwargs : dict
Keywoard arguments to pass to MarkerVisual and LineVisal.
|
def _read(self, command, future):
"""Invoked when a command is executed to read and parse its results.
It will loop on the IOLoop until the response is complete and then
set the value of the response in the execution future.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
"""
response = self._reader.gets()
if response is not False:
if isinstance(response, hiredis.ReplyError):
if response.args[0].startswith('MOVED '):
self._on_cluster_data_moved(response.args[0], command,
future)
elif response.args[0].startswith('READONLY '):
self._on_read_only_error(command, future)
else:
future.set_exception(exceptions.RedisError(response))
elif command.callback is not None:
future.set_result(command.callback(response))
elif command.expectation is not None:
self._eval_expectation(command, response, future)
else:
future.set_result(response)
else:
def on_data(data):
# LOGGER.debug('Read %r', data)
self._reader.feed(data)
self._read(command, future)
command.connection.read(on_data)
|
Invoked when a command is executed to read and parse its results.
It will loop on the IOLoop until the response is complete and then
set the value of the response in the execution future.
:param command: The command that was being executed
:type command: tredis.client.Command
:param future: The execution future
:type future: tornado.concurrent.Future
|
def _try_assign_utc_time(self, raw_time, time_base):
"""Try to assign a UTC time to this reading."""
# Check if the raw time is encoded UTC since y2k or just uptime
if raw_time != IOTileEvent.InvalidRawTime and (raw_time & (1 << 31)):
y2k_offset = self.raw_time ^ (1 << 31)
return self._Y2KReference + datetime.timedelta(seconds=y2k_offset)
if time_base is not None:
return time_base + datetime.timedelta(seconds=raw_time)
return None
|
Try to assign a UTC time to this reading.
|
def set_obs_angle(self, theta_rad):
"""Set the observer angle relative to the field.
**Call signature**
*theta_rad*
The angle between the ray path and the local magnetic field,
in radians.
Returns
*self* for convenience in chaining.
"""
self.in_vals[IN_VAL_THETA] = theta_rad * 180 / np.pi # rad => deg
return self
|
Set the observer angle relative to the field.
**Call signature**
*theta_rad*
The angle between the ray path and the local magnetic field,
in radians.
Returns
*self* for convenience in chaining.
|
def _get_last_worker_died(self):
"""Return the last died worker information or None"""
for service_id in list(self._running_services.keys()):
# We copy the list to clean the orignal one
processes = list(self._running_services[service_id].items())
for process, worker_id in processes:
if not process.is_alive():
self._run_hooks('dead_worker', service_id, worker_id,
process.exitcode)
if process.exitcode < 0:
sig = _utils.signal_to_name(process.exitcode)
LOG.info('Child %(pid)d killed by signal %(sig)s',
dict(pid=process.pid, sig=sig))
else:
LOG.info('Child %(pid)d exited with status %(code)d',
dict(pid=process.pid, code=process.exitcode))
del self._running_services[service_id][process]
return service_id, worker_id
|
Return the last died worker information or None
|
def match_date(self, value, strict=False):
"""if value is a date"""
value = stringify(value)
try:
parse(value)
except Exception:
self.shout('Value %r is not a valid date', strict, value)
|
if value is a date
|
def get_symlink_luid():
"""
Get the LUID for the SeCreateSymbolicLinkPrivilege
"""
symlink_luid = privilege.LUID()
res = privilege.LookupPrivilegeValue(
None, "SeCreateSymbolicLinkPrivilege", symlink_luid)
if not res > 0:
raise RuntimeError("Couldn't lookup privilege value")
return symlink_luid
|
Get the LUID for the SeCreateSymbolicLinkPrivilege
|
def safe_type(self, data, tree):
"""
Make sure that the incoming data complies with the class type we
are expecting it to be. In this case, classes that inherit from this
base class expect data to be of type ``list``.
"""
if not isinstance(data, list):
name = self.__class__.__name__
msg = "did not pass validation against callable: %s" % name
reason = 'expected a list but got %s' % safe_repr(data)
raise Invalid(self.schema, tree, reason=reason, pair='value', msg=msg)
|
Make sure that the incoming data complies with the class type we
are expecting it to be. In this case, classes that inherit from this
base class expect data to be of type ``list``.
|
def lstlti(x, n, array):
"""
Given a number x and an array of non-decreasing int,
find the index of the largest array element less than x.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lstlti_c.html
:param x: Value to search against
:type x: int
:param n: Number elements in array
:type n: int
:param array: Array of possible lower bounds
:type array: list
:return: index of the last element of array that is less than x.
:rtype: int
"""
array = stypes.toIntVector(array)
x = ctypes.c_int(x)
n = ctypes.c_int(n)
return libspice.lstlti_c(x, n, array)
|
Given a number x and an array of non-decreasing int,
find the index of the largest array element less than x.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/lstlti_c.html
:param x: Value to search against
:type x: int
:param n: Number elements in array
:type n: int
:param array: Array of possible lower bounds
:type array: list
:return: index of the last element of array that is less than x.
:rtype: int
|
def runningMedian(seq, M):
"""
Purpose: Find the median for the points in a sliding window (odd number in size)
as it is moved from left to right by one point at a time.
Inputs:
seq -- list containing items for which a running median (in a sliding window)
is to be calculated
M -- number of items in window (window size) -- must be an integer > 1
Otputs:
medians -- list of medians with size N - M + 1
Note:
1. The median of a finite list of numbers is the "center" value when this list
is sorted in ascending order.
2. If M is an even number the two elements in the window that
are close to the center are averaged to give the median (this
is not by definition)
"""
seq = iter(seq)
s = []
m = M // 2 #// does a truncated division like integer division in Python 2
# Set up list s (to be sorted) and load deque with first window of seq
s = [item for item in islice(seq,M)]
d = deque(s)
# Simple lambda function to handle even/odd window sizes
median = lambda : s[m] if bool(M&1) else (s[m-1]+s[m])*0.5
# Sort it in increasing order and extract the median ("center" of the sorted window)
s.sort()
medians = [median()]
# Now slide the window by one point to the right for each new position (each pass through
# the loop). Stop when the item in the right end of the deque contains the last item in seq
for item in seq:
old = d.popleft() # pop oldest from left
d.append(item) # push newest in from right
del s[bisect_left(s, old)] # locate insertion point and then remove old
insort(s, item) # insert newest such that new sort is not required
medians.append(median())
return medians
|
Purpose: Find the median for the points in a sliding window (odd number in size)
as it is moved from left to right by one point at a time.
Inputs:
seq -- list containing items for which a running median (in a sliding window)
is to be calculated
M -- number of items in window (window size) -- must be an integer > 1
Otputs:
medians -- list of medians with size N - M + 1
Note:
1. The median of a finite list of numbers is the "center" value when this list
is sorted in ascending order.
2. If M is an even number the two elements in the window that
are close to the center are averaged to give the median (this
is not by definition)
|
def update_checkplot_objectinfo(cpf,
fast_mode=False,
findercmap='gray_r',
finderconvolve=None,
deredden_object=True,
custom_bandpasses=None,
gaia_submit_timeout=10.0,
gaia_submit_tries=3,
gaia_max_timeout=180.0,
gaia_mirror=None,
complete_query_later=True,
lclistpkl=None,
nbrradiusarcsec=60.0,
maxnumneighbors=5,
plotdpi=100,
findercachedir='~/.astrobase/stamp-cache',
verbose=True):
'''This updates a checkplot objectinfo dict.
Useful in cases where a previous round of GAIA/finderchart/external catalog
acquisition failed. This will preserve the following keys in the checkplot
if they exist::
comments
varinfo
objectinfo.objecttags
Parameters
----------
cpf : str
The path to the checkplot pickle to update.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond. See the docstring for
:py:func:`astrobase.checkplot.pkl_utils._pkl_finder_objectinfo` for
details on how this works. If this is True, will run in "fast" mode with
default timeouts (5 seconds in most cases). If this is a float, will run
in "fast" mode with the provided timeout value in seconds.
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
deredden_objects : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by the `varclass.starfeatures.color_features` function. See
its docstring for details on the required format.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
:py:data:`astrobase.services.gaia.GAIA_URLS` dict which defines the URLs
to hit for each mirror.
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
Returns
-------
str
Path to the updated checkplot pickle file.
'''
cpd = _read_checkplot_picklefile(cpf)
if cpd['objectinfo']['objecttags'] is not None:
objecttags = cpd['objectinfo']['objecttags'][::]
else:
objecttags = None
varinfo = deepcopy(cpd['varinfo'])
if 'comments' in cpd and cpd['comments'] is not None:
comments = cpd['comments'][::]
else:
comments = None
newcpd = _pkl_finder_objectinfo(cpd['objectinfo'],
varinfo,
findercmap,
finderconvolve,
cpd['sigclip'],
cpd['normto'],
cpd['normmingap'],
fast_mode=fast_mode,
deredden_object=deredden_object,
custom_bandpasses=custom_bandpasses,
gaia_submit_timeout=gaia_submit_timeout,
gaia_submit_tries=gaia_submit_tries,
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
complete_query_later=complete_query_later,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
maxnumneighbors=maxnumneighbors,
plotdpi=plotdpi,
findercachedir=findercachedir,
verbose=verbose)
#
# don't update neighbors or finder chart if the new one is bad
#
if (newcpd['finderchart'] is None and
cpd['finderchart'] is not None):
newcpd['finderchart'] = deepcopy(
cpd['finderchart']
)
if (newcpd['neighbors'] is None and
cpd['neighbors'] is not None):
newcpd['neighbors'] = deepcopy(
cpd['neighbors']
)
#
# if there's existing GAIA info, don't overwrite if the new objectinfo dict
# doesn't have any
#
if (('failed' in newcpd['objectinfo']['gaia_status'] or
('gaiaid' in newcpd['objectinfo'] and
newcpd['objectinfo']['gaiaid'] is None)) and
'ok' in cpd['objectinfo']['gaia_status']):
newcpd['objectinfo']['gaia_status'] = deepcopy(
cpd['objectinfo']['gaia_status']
)
if 'gaiaid' in cpd['objectinfo']:
newcpd['objectinfo']['gaiaid'] = deepcopy(
cpd['objectinfo']['gaiaid']
)
newcpd['objectinfo']['gaiamag'] = deepcopy(
cpd['objectinfo']['gaiamag']
)
newcpd['objectinfo']['gaia_absmag'] = deepcopy(
cpd['objectinfo']['gaia_absmag']
)
newcpd['objectinfo']['gaia_parallax'] = deepcopy(
cpd['objectinfo']['gaia_parallax']
)
newcpd['objectinfo']['gaia_parallax_err'] = deepcopy(
cpd['objectinfo']['gaia_parallax_err']
)
newcpd['objectinfo']['gaia_pmra'] = deepcopy(
cpd['objectinfo']['gaia_pmra']
)
newcpd['objectinfo']['gaia_pmra_err'] = deepcopy(
cpd['objectinfo']['gaia_pmra_err']
)
newcpd['objectinfo']['gaia_pmdecl'] = deepcopy(
cpd['objectinfo']['gaia_pmdecl']
)
newcpd['objectinfo']['gaia_pmdecl_err'] = deepcopy(
cpd['objectinfo']['gaia_pmdecl_err']
)
if (not np.isfinite(newcpd['objectinfo']['gaia_neighbors']) and
np.isfinite(cpd['objectinfo']['gaia_neighbors'])):
newcpd['objectinfo']['gaia_neighbors'] = deepcopy(
cpd['objectinfo']['gaia_neighbors']
)
if (not np.isfinite(newcpd['objectinfo']['gaia_closest_distarcsec']) and
np.isfinite(cpd['objectinfo']['gaia_closest_distarcsec'])):
newcpd['objectinfo']['gaia_closest_distarcsec'] = deepcopy(
cpd['objectinfo']['gaia_closest_gmagdiff']
)
if (not np.isfinite(newcpd['objectinfo']['gaia_closest_gmagdiff']) and
np.isfinite(cpd['objectinfo']['gaia_closest_gmagdiff'])):
newcpd['objectinfo']['gaia_closest_gmagdiff'] = deepcopy(
cpd['objectinfo']['gaia_closest_gmagdiff']
)
if (newcpd['objectinfo']['gaia_ids'] is None and
cpd['objectinfo']['gaia_ids'] is not None):
newcpd['objectinfo']['gaia_ids'] = deepcopy(
cpd['objectinfo']['gaia_ids']
)
if (newcpd['objectinfo']['gaia_xypos'] is None and
cpd['objectinfo']['gaia_xypos'] is not None):
newcpd['objectinfo']['gaia_xypos'] = deepcopy(
cpd['objectinfo']['gaia_xypos']
)
if (newcpd['objectinfo']['gaia_mags'] is None and
cpd['objectinfo']['gaia_mags'] is not None):
newcpd['objectinfo']['gaia_mags'] = deepcopy(
cpd['objectinfo']['gaia_mags']
)
if (newcpd['objectinfo']['gaia_parallaxes'] is None and
cpd['objectinfo']['gaia_parallaxes'] is not None):
newcpd['objectinfo']['gaia_parallaxes'] = deepcopy(
cpd['objectinfo']['gaia_parallaxes']
)
if (newcpd['objectinfo']['gaia_parallax_errs'] is None and
cpd['objectinfo']['gaia_parallax_errs'] is not None):
newcpd['objectinfo']['gaia_parallax_errs'] = deepcopy(
cpd['objectinfo']['gaia_parallax_errs']
)
if (newcpd['objectinfo']['gaia_pmras'] is None and
cpd['objectinfo']['gaia_pmras'] is not None):
newcpd['objectinfo']['gaia_pmras'] = deepcopy(
cpd['objectinfo']['gaia_pmras']
)
if (newcpd['objectinfo']['gaia_pmra_errs'] is None and
cpd['objectinfo']['gaia_pmra_errs'] is not None):
newcpd['objectinfo']['gaia_pmra_errs'] = deepcopy(
cpd['objectinfo']['gaia_pmra_errs']
)
if (newcpd['objectinfo']['gaia_pmdecls'] is None and
cpd['objectinfo']['gaia_pmdecls'] is not None):
newcpd['objectinfo']['gaia_pmdecls'] = deepcopy(
cpd['objectinfo']['gaia_pmdecls']
)
if (newcpd['objectinfo']['gaia_pmdecl_errs'] is None and
cpd['objectinfo']['gaia_pmdecl_errs'] is not None):
newcpd['objectinfo']['gaia_pmdecl_errs'] = deepcopy(
cpd['objectinfo']['gaia_pmdecl_errs']
)
if (newcpd['objectinfo']['gaia_absolute_mags'] is None and
cpd['objectinfo']['gaia_absolute_mags'] is not None):
newcpd['objectinfo']['gaia_absolute_mags'] = deepcopy(
cpd['objectinfo']['gaia_absolute_mags']
)
if (newcpd['objectinfo']['gaiak_colors'] is None and
cpd['objectinfo']['gaiak_colors'] is not None):
newcpd['objectinfo']['gaiak_colors'] = deepcopy(
cpd['objectinfo']['gaiak_colors']
)
if (newcpd['objectinfo']['gaia_dists'] is None and
cpd['objectinfo']['gaia_dists'] is not None):
newcpd['objectinfo']['gaia_dists'] = deepcopy(
cpd['objectinfo']['gaia_dists']
)
#
# don't overwrite good SIMBAD info with bad
#
if ('failed' in newcpd['objectinfo']['simbad_status'] and
'ok' in cpd['objectinfo']['simbad_status']):
newcpd['objectinfo']['simbad_status'] = deepcopy(
cpd['objectinfo']['simbad_status']
)
if (newcpd['objectinfo']['simbad_nmatches'] is None and
cpd['objectinfo']['simbad_nmatches'] is not None):
newcpd['objectinfo']['simbad_nmatches'] = deepcopy(
cpd['objectinfo']['simbad_nmatches']
)
if (newcpd['objectinfo']['simbad_mainid'] is None and
cpd['objectinfo']['simbad_mainid'] is not None):
newcpd['objectinfo']['simbad_mainid'] = deepcopy(
cpd['objectinfo']['simbad_mainid']
)
if (newcpd['objectinfo']['simbad_objtype'] is None and
cpd['objectinfo']['simbad_objtype'] is not None):
newcpd['objectinfo']['simbad_objtype'] = deepcopy(
cpd['objectinfo']['simbad_objtype']
)
if (newcpd['objectinfo']['simbad_allids'] is None and
cpd['objectinfo']['simbad_allids'] is not None):
newcpd['objectinfo']['simbad_allids'] = deepcopy(
cpd['objectinfo']['simbad_allids']
)
if (newcpd['objectinfo']['simbad_distarcsec'] is None and
cpd['objectinfo']['simbad_distarcsec'] is not None):
newcpd['objectinfo']['simbad_distarcsec'] = deepcopy(
cpd['objectinfo']['simbad_distarcsec']
)
if (newcpd['objectinfo']['simbad_best_mainid'] is None and
cpd['objectinfo']['simbad_best_mainid'] is not None):
newcpd['objectinfo']['simbad_best_mainid'] = deepcopy(
cpd['objectinfo']['simbad_best_mainid']
)
if (newcpd['objectinfo']['simbad_best_objtype'] is None and
cpd['objectinfo']['simbad_best_objtype'] is not None):
newcpd['objectinfo']['simbad_best_objtype'] = deepcopy(
cpd['objectinfo']['simbad_best_objtype']
)
if (newcpd['objectinfo']['simbad_best_allids'] is None and
cpd['objectinfo']['simbad_best_allids'] is not None):
newcpd['objectinfo']['simbad_best_allids'] = deepcopy(
cpd['objectinfo']['simbad_best_allids']
)
if (newcpd['objectinfo']['simbad_best_distarcsec'] is None and
cpd['objectinfo']['simbad_best_distarcsec'] is not None):
newcpd['objectinfo']['simbad_best_distarcsec'] = deepcopy(
cpd['objectinfo']['simbad_best_distarcsec']
)
#
# update the objectinfo dict
#
cpd.update(newcpd)
cpd['objectinfo']['objecttags'] = objecttags
cpd['comments'] = comments
newcpf = _write_checkplot_picklefile(cpd, outfile=cpf)
return newcpf
|
This updates a checkplot objectinfo dict.
Useful in cases where a previous round of GAIA/finderchart/external catalog
acquisition failed. This will preserve the following keys in the checkplot
if they exist::
comments
varinfo
objectinfo.objecttags
Parameters
----------
cpf : str
The path to the checkplot pickle to update.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond. See the docstring for
:py:func:`astrobase.checkplot.pkl_utils._pkl_finder_objectinfo` for
details on how this works. If this is True, will run in "fast" mode with
default timeouts (5 seconds in most cases). If this is a float, will run
in "fast" mode with the provided timeout value in seconds.
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
deredden_objects : bool
If this is True, will use the 2MASS DUST service to get extinction
coefficients in various bands, and then try to deredden the magnitudes
and colors of the object already present in the checkplot's objectinfo
dict.
custom_bandpasses : dict
This is a dict used to provide custom bandpass definitions for any
magnitude measurements in the objectinfo dict that are not automatically
recognized by the `varclass.starfeatures.color_features` function. See
its docstring for details on the required format.
gaia_submit_timeout : float
Sets the timeout in seconds to use when submitting a request to look up
the object's information to the GAIA service. Note that if `fast_mode`
is set, this is ignored.
gaia_submit_tries : int
Sets the maximum number of times the GAIA services will be contacted to
obtain this object's information. If `fast_mode` is set, this is
ignored, and the services will be contacted only once (meaning that a
failure to respond will be silently ignored and no GAIA data will be
added to the checkplot's objectinfo dict).
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str
This sets the GAIA mirror to use. This is a key in the
:py:data:`astrobase.services.gaia.GAIA_URLS` dict which defines the URLs
to hit for each mirror.
complete_query_later : bool
If this is True, saves the state of GAIA queries that are not yet
complete when `gaia_max_timeout` is reached while waiting for the GAIA
service to respond to our request. A later call for GAIA info on the
same object will attempt to pick up the results from the existing query
if it's completed. If `fast_mode` is True, this is ignored.
lclistpkl : dict or str
If this is provided, must be a dict resulting from reading a catalog
produced by the `lcproc.catalogs.make_lclist` function or a str path
pointing to the pickle file produced by that function. This catalog is
used to find neighbors of the current object in the current light curve
collection. Looking at neighbors of the object within the radius
specified by `nbrradiusarcsec` is useful for light curves produced by
instruments that have a large pixel scale, so are susceptible to
blending of variability and potential confusion of neighbor variability
with that of the actual object being looked at. If this is None, no
neighbor lookups will be performed.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
plotdpi : int
The resolution in DPI of the plots to generate in this function
(e.g. the finder chart, etc.)
findercachedir : str
The path to the astrobase cache directory for finder chart downloads
from the NASA SkyView service.
verbose : bool
If True, will indicate progress and warn about potential problems.
Returns
-------
str
Path to the updated checkplot pickle file.
|
def extract_links(bs4):
"""Extracting links from BeautifulSoup object
:param bs4: `BeautifulSoup`
:return: `list` List of links
"""
unique_links = list(set([anchor['href'] for anchor in bs4.select('a[href]') if anchor.has_attr('href')]))
# remove irrelevant link
unique_links = [link for link in unique_links if link != '#']
# convert invalid link with adding 'http' schema
return [convert_invalid_url(link) for link in unique_links]
|
Extracting links from BeautifulSoup object
:param bs4: `BeautifulSoup`
:return: `list` List of links
|
def filter_none(list_of_points):
"""
:param list_of_points:
:return: list_of_points with None's removed
"""
remove_elementnone = filter(lambda p: p is not None, list_of_points)
remove_sublistnone = filter(lambda p: not contains_none(p), remove_elementnone)
return list(remove_sublistnone)
|
:param list_of_points:
:return: list_of_points with None's removed
|
def formatmany(self, sql, many_params):
"""
Formats the SQL query to use ordinal parameters instead of named
parameters.
*sql* (|string|) is the SQL query.
*many_params* (|iterable|) contains each *params* to format.
- *params* (|dict|) maps each named parameter (|string|) to value
(|object|). If |self.named| is "numeric", then *params* can be
simply a |sequence| of values mapped by index.
Returns a 2-|tuple| containing: the formatted SQL query (|string|),
and a |list| containing each ordinal parameters (|list|).
"""
if isinstance(sql, unicode):
string_type = unicode
elif isinstance(sql, bytes):
string_type = bytes
sql = sql.decode(_BYTES_ENCODING)
else:
raise TypeError("sql:{!r} is not a unicode or byte string.".format(sql))
if not isinstance(many_params, collections.Iterable) or isinstance(many_params, (unicode, bytes)):
raise TypeError("many_params:{!r} is not iterable.".format(many_params))
# Find named parameters.
names = self.match.findall(sql)
name_set = set(names)
# Map named parameters to ordinals.
many_ord_params = []
name_to_ords = {}
name_to_len = {}
repl_str = self.replace
repl_tuple = (repl_str,)
for i, params in enumerate(many_params):
if self.named == 'numeric':
if isinstance(params, collections.Mapping):
params = {string_type(idx): val for idx, val in iteritems(params)}
elif isinstance(params, collections.Sequence) and not isinstance(params, (unicode, bytes)):
params = {string_type(idx): val for idx, val in enumerate(params, 1)}
if not isinstance(params, collections.Mapping):
raise TypeError("many_params[{}]:{!r} is not a dict.".format(i, params))
if not i: # first
# Map names to ordinals, and determine what names are tuples and
# what their lengths are.
for name in name_set:
value = params[name]
if isinstance(value, tuple):
tuple_len = len(value)
name_to_ords[name] = '(' + ','.join(repl_tuple * tuple_len) + ')'
name_to_len[name] = tuple_len
else:
name_to_ords[name] = repl_str
name_to_len[name] = None
# Make sure tuples match up and collapse tuples into ordinals.
ord_params = []
for name in names:
value = params[name]
tuple_len = name_to_len[name]
if tuple_len is not None:
if not isinstance(value, tuple):
raise TypeError("many_params[{}][{!r}]:{!r} was expected to be a tuple.".format(i, name, value))
elif len(value) != tuple_len:
raise ValueError("many_params[{}][{!r}]:{!r} length was expected to be {}.".format(i, name, value, tuple_len))
ord_params.extend(value)
else:
ord_params.append(value)
many_ord_params.append(ord_params)
# Replace named parameters with ordinals.
sql = self.match.sub(lambda m: name_to_ords[m.group(1)], sql)
# Make sure the query is returned as the proper string type.
if string_type is bytes:
sql = sql.encode(_BYTES_ENCODING)
# Return formatted SQL and new ordinal parameters.
return sql, many_ord_params
|
Formats the SQL query to use ordinal parameters instead of named
parameters.
*sql* (|string|) is the SQL query.
*many_params* (|iterable|) contains each *params* to format.
- *params* (|dict|) maps each named parameter (|string|) to value
(|object|). If |self.named| is "numeric", then *params* can be
simply a |sequence| of values mapped by index.
Returns a 2-|tuple| containing: the formatted SQL query (|string|),
and a |list| containing each ordinal parameters (|list|).
|
def register_hooks(self, field):
"""Register a field on its target hooks."""
for hook, subhooks in field.register_hooks():
self.hooks[hook].append(field)
self.subhooks[hook] |= set(subhooks)
|
Register a field on its target hooks.
|
def pick_up_tip(self, location=None, presses=None, increment=None):
"""
Pick up a tip for the Pipette to run liquid-handling commands with
Notes
-----
A tip can be manually set by passing a `location`. If no location
is passed, the Pipette will pick up the next available tip in
it's `tip_racks` list (see :any:`Pipette`)
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the pick_up_tip.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
presses : :any:int
The number of times to lower and then raise the pipette when
picking up a tip, to ensure a good seal (0 [zero] will result in
the pipette hovering over the tip but not picking it up--generally
not desireable, but could be used for dry-run). Default: 3 presses
increment: :int
The additional distance to travel on each successive press (e.g.:
if presses=3 and increment=1, then the first press will travel down
into the tip by 3.5mm, the second by 4.5mm, and the third by 5.5mm.
Default: 1mm
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> tiprack = labware.load('GEB-tiprack-300', '2') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left',
... tip_racks=[tiprack]) # doctest: +SKIP
>>> p300.pick_up_tip(tiprack[0]) # doctest: +SKIP
>>> p300.return_tip() # doctest: +SKIP
# `pick_up_tip` will automatically go to tiprack[1]
>>> p300.pick_up_tip() # doctest: +SKIP
>>> p300.return_tip() # doctest: +SKIP
"""
if self.tip_attached:
log.warning("There is already a tip attached to this pipette.")
if not location:
location = self.get_next_tip()
self.current_tip(None)
if location:
placeable, _ = unpack_location(location)
self.current_tip(placeable)
presses = (self._pick_up_presses
if not helpers.is_number(presses)
else presses)
increment = (self._pick_up_increment
if not helpers.is_number(increment)
else increment)
def _pick_up_tip(
self, location, presses, increment):
self.instrument_actuator.set_active_current(self._plunger_current)
self.robot.poses = self.instrument_actuator.move(
self.robot.poses,
x=self._get_plunger_position('bottom')
)
self.current_volume = 0
self.move_to(self.current_tip().top(0))
for i in range(int(presses)):
# move nozzle down into the tip
self.instrument_mover.push_speed()
self.instrument_mover.push_active_current()
self.instrument_mover.set_active_current(self._pick_up_current)
self.instrument_mover.set_speed(self._pick_up_speed)
dist = (-1 * self._pick_up_distance) + (-1 * increment * i)
self.move_to(
self.current_tip().top(dist),
strategy='direct')
# move nozzle back up
self.instrument_mover.pop_active_current()
self.instrument_mover.pop_speed()
self.move_to(
self.current_tip().top(0),
strategy='direct')
self._add_tip(
length=self._tip_length
)
# neighboring tips tend to get stuck in the space between
# the volume chamber and the drop-tip sleeve on p1000.
# This extra shake ensures those tips are removed
if 'needs-pickup-shake' in self.quirks:
self._shake_off_tips(location)
self._shake_off_tips(location)
self.previous_placeable = None # no longer inside a placeable
self.robot.poses = self.instrument_mover.fast_home(
self.robot.poses, self._pick_up_distance)
return self
do_publish(self.broker, commands.pick_up_tip, self.pick_up_tip,
'before', None, None, self, location, presses, increment)
_pick_up_tip(
self, location=location, presses=presses, increment=increment)
do_publish(self.broker, commands.pick_up_tip, self.pick_up_tip,
'after', self, None, self, location, presses, increment)
return self
|
Pick up a tip for the Pipette to run liquid-handling commands with
Notes
-----
A tip can be manually set by passing a `location`. If no location
is passed, the Pipette will pick up the next available tip in
it's `tip_racks` list (see :any:`Pipette`)
Parameters
----------
location : :any:`Placeable` or tuple(:any:`Placeable`, :any:`Vector`)
The :any:`Placeable` (:any:`Well`) to perform the pick_up_tip.
Can also be a tuple with first item :any:`Placeable`,
second item relative :any:`Vector`
presses : :any:int
The number of times to lower and then raise the pipette when
picking up a tip, to ensure a good seal (0 [zero] will result in
the pipette hovering over the tip but not picking it up--generally
not desireable, but could be used for dry-run). Default: 3 presses
increment: :int
The additional distance to travel on each successive press (e.g.:
if presses=3 and increment=1, then the first press will travel down
into the tip by 3.5mm, the second by 4.5mm, and the third by 5.5mm.
Default: 1mm
Returns
-------
This instance of :class:`Pipette`.
Examples
--------
..
>>> from opentrons import instruments, labware, robot # doctest: +SKIP
>>> robot.reset() # doctest: +SKIP
>>> tiprack = labware.load('GEB-tiprack-300', '2') # doctest: +SKIP
>>> p300 = instruments.P300_Single(mount='left',
... tip_racks=[tiprack]) # doctest: +SKIP
>>> p300.pick_up_tip(tiprack[0]) # doctest: +SKIP
>>> p300.return_tip() # doctest: +SKIP
# `pick_up_tip` will automatically go to tiprack[1]
>>> p300.pick_up_tip() # doctest: +SKIP
>>> p300.return_tip() # doctest: +SKIP
|
def bulk_csv_import_mongo(csvfile, database_name, collection_name,
delete_collection_before_import=False):
"""return a response_dict with a list of search results"""
"""method can be insert or update"""
l = []
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mconnection[database_name]
collection = db[collection_name]
if delete_collection_before_import:
myobjectid = collection.remove({})
# open the csv file.
csvhandle = csv.reader(open(csvfile._get_path(), 'rb'), delimiter=',')
rowindex = 0
errors = 0
error_list = []
success = 0
for row in csvhandle:
if rowindex == 0:
column_headers = row
cleaned_headers = []
for c in column_headers:
c = c.replace(".", "")
c = c.replace("$", "-")
c = c.replace(" ", "_")
cleaned_headers.append(c)
else:
record = OrderedDict(zip(cleaned_headers, row))
# if there is no values, skip the key value pair
kwargs = OrderedDict()
# Only populate fields that are not blank.
for k, v in record.items():
if v:
if v.isdigit():
kwargs[k] = int(v)
else:
kwargs[k] = v
try:
myobjectid = collection.insert(kwargs)
success += 1
except:
error_message = "Error on row " + \
rowindex + ". " + str(sys.exc_info())
error_list.append(str(sys.exc_info()))
rowindex += 1
if error_list:
response_dict = {}
response_dict['num_rows_imported'] = rowindex
response_dict['num_rows_errors'] = len(error_list)
response_dict['errors'] = error_list
response_dict['code'] = 400
response_dict['message'] = "Completed with errors"
else:
response_dict = {}
response_dict['num_rows_imported'] = success
response_dict['code'] = 200
response_dict['message'] = "Completed."
return response_dict
except:
# print "Error reading from Mongo"
# print str(sys.exc_info())
response_dict['num_results'] = 0
response_dict['code'] = 400
response_dict['type'] = "Error"
response_dict['results'] = []
response_dict['message'] = str(sys.exc_info())
return response_dict
|
return a response_dict with a list of search results
|
def resolve_variable(var_name, var_def, provided_variable, blueprint_name):
"""Resolve a provided variable value against the variable definition.
Args:
var_name (str): The name of the defined variable on a blueprint.
var_def (dict): A dictionary representing the defined variables
attributes.
provided_variable (:class:`stacker.variables.Variable`): The variable
value provided to the blueprint.
blueprint_name (str): The name of the blueprint that the variable is
being applied to.
Returns:
object: The resolved variable value, could be any python object.
Raises:
MissingVariable: Raised when a variable with no default is not
provided a value.
UnresolvedVariable: Raised when the provided variable is not already
resolved.
ValueError: Raised when the value is not the right type and cannot be
cast as the correct type. Raised by
:func:`stacker.blueprints.base.validate_variable_type`
ValidatorError: Raised when a validator raises an exception. Wraps the
original exception.
"""
try:
var_type = var_def["type"]
except KeyError:
raise VariableTypeRequired(blueprint_name, var_name)
if provided_variable:
if not provided_variable.resolved:
raise UnresolvedVariable(blueprint_name, provided_variable)
value = provided_variable.value
else:
# Variable value not provided, try using the default, if it exists
# in the definition
try:
value = var_def["default"]
except KeyError:
raise MissingVariable(blueprint_name, var_name)
# If no validator, return the value as is, otherwise apply validator
validator = var_def.get("validator", lambda v: v)
try:
value = validator(value)
except Exception as exc:
raise ValidatorError(var_name, validator.__name__, value, exc)
# Ensure that the resulting value is the correct type
value = validate_variable_type(var_name, var_type, value)
allowed_values = var_def.get("allowed_values")
if not validate_allowed_values(allowed_values, value):
message = (
"Invalid value passed to '%s' in blueprint: %s. Got: '%s', "
"expected one of %s"
) % (var_name, blueprint_name, value, allowed_values)
raise ValueError(message)
return value
|
Resolve a provided variable value against the variable definition.
Args:
var_name (str): The name of the defined variable on a blueprint.
var_def (dict): A dictionary representing the defined variables
attributes.
provided_variable (:class:`stacker.variables.Variable`): The variable
value provided to the blueprint.
blueprint_name (str): The name of the blueprint that the variable is
being applied to.
Returns:
object: The resolved variable value, could be any python object.
Raises:
MissingVariable: Raised when a variable with no default is not
provided a value.
UnresolvedVariable: Raised when the provided variable is not already
resolved.
ValueError: Raised when the value is not the right type and cannot be
cast as the correct type. Raised by
:func:`stacker.blueprints.base.validate_variable_type`
ValidatorError: Raised when a validator raises an exception. Wraps the
original exception.
|
def extract(self, feature, remove_subfeatures=False):
'''Extract a feature from the sequence. This operation is complementary
to the .excise() method.
:param feature: Feature object.
:type feature: coral.sequence.Feature
:param remove_subfeatures: Remove all features in the extracted
sequence aside from the input feature.
:type remove_subfeatures: bool
:returns: A subsequence from start to stop of the feature.
'''
extracted = self[feature.start:feature.stop]
# Turn gaps into Ns or Xs
for gap in feature.gaps:
for i in range(*gap):
extracted[i] = self._any_char
if remove_subfeatures:
# Keep only the feature specified
extracted.features = [feature]
# Update feature locations
# copy them
for feature in extracted.features:
feature.move(-feature.start)
return extracted
|
Extract a feature from the sequence. This operation is complementary
to the .excise() method.
:param feature: Feature object.
:type feature: coral.sequence.Feature
:param remove_subfeatures: Remove all features in the extracted
sequence aside from the input feature.
:type remove_subfeatures: bool
:returns: A subsequence from start to stop of the feature.
|
def run(self, module, options):
"""
Run the operator.
:param module: The target module path.
:type module: ``str``
:param options: Any runtime options.
:type options: ``dict``
:return: The operator results.
:rtype: ``dict``
"""
logger.debug("Running maintainability harvester")
return dict(self.harvester.results)
|
Run the operator.
:param module: The target module path.
:type module: ``str``
:param options: Any runtime options.
:type options: ``dict``
:return: The operator results.
:rtype: ``dict``
|
def _bin_update_items(self, items, replace_at_most_one,
replacements, leftovers):
"""
Subclassed from omdict._bin_update_items() to make update() and
updateall() process lists of values as multiple values.
<replacements and <leftovers> are modified directly, ala pass by
reference.
"""
for key, values in items:
# <values> is not a list or an empty list.
like_list_not_str = self._quacks_like_a_list_but_not_str(values)
if not like_list_not_str or (like_list_not_str and not values):
values = [values]
for value in values:
# If the value is [], remove any existing leftovers with
# key <key> and set the list of values itself to [],
# which in turn will later delete <key> when [] is
# passed to omdict.setlist() in
# omdict._update_updateall().
if value == []:
replacements[key] = []
leftovers[:] = [l for l in leftovers if key != l[0]]
continue
# If there are existing items with key <key> that have
# yet to be marked for replacement, mark that item's
# value to be replaced by <value> by appending it to
# <replacements>. TODO: Refactor for clarity
if (key in self and
(key not in replacements or
(key in replacements and
replacements[key] == []))):
replacements[key] = [value]
elif (key in self and not replace_at_most_one and
len(replacements[key]) < len(self.values(key))):
replacements[key].append(value)
else:
if replace_at_most_one:
replacements[key] = [value]
else:
leftovers.append((key, value))
|
Subclassed from omdict._bin_update_items() to make update() and
updateall() process lists of values as multiple values.
<replacements and <leftovers> are modified directly, ala pass by
reference.
|
def set_row(self, index, values):
"""
Sets the values of the columns in a single row.
:param index: index value
:param values: dict with the keys as the column names and the values what to set that column to
:return: nothing
"""
if self._sort:
exists, i = sorted_exists(self._index, index)
if not exists:
self._insert_row(i, index)
else:
try:
i = self._index.index(index)
except ValueError: # new row
i = len(self._index)
self._add_row(index)
if isinstance(values, dict):
if not (set(values.keys()).issubset(self._columns)):
raise ValueError('keys of values are not all in existing columns')
for c, column in enumerate(self._columns):
self._data[c][i] = values.get(column, self._data[c][i])
else:
raise TypeError('cannot handle values of this type.')
|
Sets the values of the columns in a single row.
:param index: index value
:param values: dict with the keys as the column names and the values what to set that column to
:return: nothing
|
def _rename(self):
"""
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
"""
newname = self.action['newname']
try:
newpath = self.fs.rename(self.fp,newname)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
|
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
|
def fetch_attacks_data(self):
"""Initializes data necessary to execute attacks.
This method could be called multiple times, only first call does
initialization, subsequent calls are noop.
"""
if self.attacks_data_initialized:
return
# init data from datastore
self.submissions.init_from_datastore()
self.dataset_batches.init_from_datastore()
self.adv_batches.init_from_datastore()
# copy dataset locally
if not os.path.exists(LOCAL_DATASET_DIR):
os.makedirs(LOCAL_DATASET_DIR)
eval_lib.download_dataset(self.storage_client, self.dataset_batches,
LOCAL_DATASET_DIR,
os.path.join(LOCAL_DATASET_COPY,
self.dataset_name, 'images'))
# download dataset metadata
self.read_dataset_metadata()
# mark as initialized
self.attacks_data_initialized = True
|
Initializes data necessary to execute attacks.
This method could be called multiple times, only first call does
initialization, subsequent calls are noop.
|
def sorted_for_ner(crf_classes):
"""
Return labels sorted in a default order suitable for NER tasks:
>>> sorted_for_ner(['B-ORG', 'B-PER', 'O', 'I-PER'])
['O', 'B-ORG', 'B-PER', 'I-PER']
"""
def key(cls):
if len(cls) > 2 and cls[1] == '-':
# group names like B-ORG and I-ORG together
return cls.split('-', 1)[1], cls
return '', cls
return sorted(crf_classes, key=key)
|
Return labels sorted in a default order suitable for NER tasks:
>>> sorted_for_ner(['B-ORG', 'B-PER', 'O', 'I-PER'])
['O', 'B-ORG', 'B-PER', 'I-PER']
|
def save(self):
"""Save the changes to the instance and any related objects."""
# first call save with commit=False for all Forms
for form in self._forms:
if isinstance(form, BaseForm):
form.save(commit=False)
# call save on the instance
self.instance.save()
# call any post-commit hooks that have been stashed on Forms
for form in self.forms:
if isinstance(form, BaseForm):
if hasattr(form, 'save_m2m'):
form.save_m2m()
if hasattr(form, 'save_related'):
form.save_related()
# call save on any formsets
for form in self._forms:
if isinstance(form, BaseFormSet):
form.save(commit=True)
return self.instance
|
Save the changes to the instance and any related objects.
|
def get_credentials(username=None, password=None, netrc=None, use_keyring=False):
"""
Return valid username, password tuple.
Raises CredentialsError if username or password is missing.
"""
if netrc:
path = None if netrc is True else netrc
return authenticate_through_netrc(path)
if not username:
raise CredentialsError(
'Please provide a username with the -u option, '
'or a .netrc file with the -n option.')
if not password and use_keyring:
password = keyring.get_password(KEYRING_SERVICE_NAME, username)
if not password:
password = getpass.getpass('Coursera password for {0}: '.format(username))
if use_keyring:
keyring.set_password(KEYRING_SERVICE_NAME, username, password)
return username, password
|
Return valid username, password tuple.
Raises CredentialsError if username or password is missing.
|
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as '201802040nwe'.
"""
if self._away_points is None and self._home_points is None:
return None
fields_to_include = {
'attendance': self.attendance,
'away_first_downs': self.away_first_downs,
'away_fourth_down_attempts': self.away_fourth_down_attempts,
'away_fourth_down_conversions': self.away_fourth_down_conversions,
'away_fumbles': self.away_fumbles,
'away_fumbles_lost': self.away_fumbles_lost,
'away_interceptions': self.away_interceptions,
'away_net_pass_yards': self.away_net_pass_yards,
'away_pass_attempts': self.away_pass_attempts,
'away_pass_completions': self.away_pass_completions,
'away_pass_touchdowns': self.away_pass_touchdowns,
'away_pass_yards': self.away_pass_yards,
'away_penalties': self.away_penalties,
'away_points': self.away_points,
'away_rush_attempts': self.away_rush_attempts,
'away_rush_touchdowns': self.away_rush_touchdowns,
'away_rush_yards': self.away_rush_yards,
'away_third_down_attempts': self.away_third_down_attempts,
'away_third_down_conversions': self.away_third_down_conversions,
'away_time_of_possession': self.away_time_of_possession,
'away_times_sacked': self.away_times_sacked,
'away_total_yards': self.away_total_yards,
'away_turnovers': self.away_turnovers,
'away_yards_from_penalties': self.away_yards_from_penalties,
'away_yards_lost_from_sacks': self.away_yards_lost_from_sacks,
'date': self.date,
'duration': self.duration,
'home_first_downs': self.home_first_downs,
'home_fourth_down_attempts': self.home_fourth_down_attempts,
'home_fourth_down_conversions': self.home_fourth_down_conversions,
'home_fumbles': self.home_fumbles,
'home_fumbles_lost': self.home_fumbles_lost,
'home_interceptions': self.home_interceptions,
'home_net_pass_yards': self.home_net_pass_yards,
'home_pass_attempts': self.home_pass_attempts,
'home_pass_completions': self.home_pass_completions,
'home_pass_touchdowns': self.home_pass_touchdowns,
'home_pass_yards': self.home_pass_yards,
'home_penalties': self.home_penalties,
'home_points': self.home_points,
'home_rush_attempts': self.home_rush_attempts,
'home_rush_touchdowns': self.home_rush_touchdowns,
'home_rush_yards': self.home_rush_yards,
'home_third_down_attempts': self.home_third_down_attempts,
'home_third_down_conversions': self.home_third_down_conversions,
'home_time_of_possession': self.home_time_of_possession,
'home_times_sacked': self.home_times_sacked,
'home_total_yards': self.home_total_yards,
'home_turnovers': self.home_turnovers,
'home_yards_from_penalties': self.home_yards_from_penalties,
'home_yards_lost_from_sacks': self.home_yards_lost_from_sacks,
'losing_abbr': self.losing_abbr,
'losing_name': self.losing_name,
'stadium': self.stadium,
'time': self.time,
'winner': self.winner,
'winning_abbr': self.winning_abbr,
'winning_name': self.winning_name
}
return pd.DataFrame([fields_to_include], index=[self._uri])
|
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as '201802040nwe'.
|
def get_layout(self, page):
""" Get PDFMiner Layout object for given page object or page number. """
if type(page) == int:
page = self.get_page(page)
self.interpreter.process_page(page)
layout = self.device.get_result()
layout = self._add_annots(layout, page.annots)
return layout
|
Get PDFMiner Layout object for given page object or page number.
|
def set(self, key, value, expire=0, noreply=None):
"""
The memcached "set" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If no exception is raised, always returns True. If an exception is
raised, the set may or may not have occurred. If noreply is True,
then a successful return does not guarantee a successful set.
"""
if noreply is None:
noreply = self.default_noreply
return self._store_cmd(b'set', {key: value}, expire, noreply)[key]
|
The memcached "set" command.
Args:
key: str, see class docs for details.
value: str, see class docs for details.
expire: optional int, number of seconds until the item is expired
from the cache, or zero for no expiry (the default).
noreply: optional bool, True to not wait for the reply (defaults to
self.default_noreply).
Returns:
If no exception is raised, always returns True. If an exception is
raised, the set may or may not have occurred. If noreply is True,
then a successful return does not guarantee a successful set.
|
def get(cls, uni_char):
"""Return the general category code (as Unicode string) for the given Unicode character"""
uni_char = unicod(uni_char) # Force to Unicode
return unicod(unicodedata.category(uni_char))
|
Return the general category code (as Unicode string) for the given Unicode character
|
def request_get_variable_json(self, py_db, request, thread_id):
'''
:param VariablesRequest request:
'''
py_db.post_method_as_internal_command(
thread_id, internal_get_variable_json, request)
|
:param VariablesRequest request:
|
def list_functions(*args, **kwargs): # pylint: disable=unused-argument
'''
List the functions for all modules. Optionally, specify a module or modules
from which to list.
CLI Example:
.. code-block:: bash
salt '*' sys.list_functions
salt '*' sys.list_functions sys
salt '*' sys.list_functions sys user
Function names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_functions 'sys.list_*'
.. versionadded:: ?
.. code-block:: bash
salt '*' sys.list_functions 'module.specific_function'
'''
# ## NOTE: **kwargs is used here to prevent a traceback when garbage
# ## arguments are tacked on to the end.
if not args:
# We're being asked for all functions
return sorted(__salt__)
names = set()
for module in args:
if '*' in module or '.' in module:
for func in fnmatch.filter(__salt__, module):
names.add(func)
else:
# "sys" should just match sys without also matching sysctl
moduledot = module + '.'
for func in __salt__:
if func.startswith(moduledot):
names.add(func)
return sorted(names)
|
List the functions for all modules. Optionally, specify a module or modules
from which to list.
CLI Example:
.. code-block:: bash
salt '*' sys.list_functions
salt '*' sys.list_functions sys
salt '*' sys.list_functions sys user
Function names can be specified as globs.
.. versionadded:: 2015.5.0
.. code-block:: bash
salt '*' sys.list_functions 'sys.list_*'
.. versionadded:: ?
.. code-block:: bash
salt '*' sys.list_functions 'module.specific_function'
|
def get_access_token(self, code=None, **params):
"""
Return the memoized access token or go out and fetch one.
"""
if self._access_token is None:
if code is None:
raise ValueError(_('Invalid code.'))
self.access_token_dict = self._get_access_token(code, **params)
try:
self._access_token = self.access_token_dict['access_token']
except KeyError, e:
raise OAuthError("Credentials could not be validated, the provider returned no access token.")
return self._access_token
|
Return the memoized access token or go out and fetch one.
|
def get_recurrence(self, config):
'''
Calculates the recurrence model for the given settings as
an instance of the openquake.hmtk.models.IncrementalMFD
:param dict config:
Configuration settings of the magnitude frequency distribution.
'''
model = MFD_MAP[config['Model_Name']]()
model.setUp(config)
model.get_mmax(config, self.msr, self.rake, self.area)
model.mmax = model.mmax + (self.msr_sigma * model.mmax_sigma)
# As the Anderson & Luco arbitrary model requires the input of the
# displacement to length ratio
if 'AndersonLucoAreaMmax' in config['Model_Name']:
if not self.disp_length_ratio:
# If not defined then default to 1.25E-5
self.disp_length_ratio = 1.25E-5
min_mag, bin_width, occur_rates = model.get_mfd(
self.slip,
self.area, self.shear_modulus, self.disp_length_ratio)
else:
min_mag, bin_width, occur_rates = model.get_mfd(self.slip,
self.area,
self.shear_modulus)
self.recurrence = IncrementalMFD(min_mag, bin_width, occur_rates)
self.magnitudes = min_mag + np.cumsum(
bin_width *
np.ones(len(occur_rates), dtype=float)) - bin_width
self.max_mag = np.max(self.magnitudes)
|
Calculates the recurrence model for the given settings as
an instance of the openquake.hmtk.models.IncrementalMFD
:param dict config:
Configuration settings of the magnitude frequency distribution.
|
def _GetContainingRange(self, partition_key):
"""Gets the containing range based on the partition key.
"""
for keyrange in self.partition_map.keys():
if keyrange.Contains(partition_key):
return keyrange
return None
|
Gets the containing range based on the partition key.
|
def to_latlon(easting, northing, zone_number, zone_letter=None, northern=None, strict=True):
"""This function convert an UTM coordinate into Latitude and Longitude
Parameters
----------
easting: int
Easting value of UTM coordinate
northing: int
Northing value of UTM coordinate
zone number: int
Zone Number is represented with global map numbers of an UTM Zone
Numbers Map. More information see utmzones [1]_
zone_letter: str
Zone Letter can be represented as string values. Where UTM Zone
Designators can be accessed in [1]_
northern: bool
You can set True or False to set this parameter. Default is None
.. _[1]: http://www.jaworski.ca/utmzones.htm
"""
if not zone_letter and northern is None:
raise ValueError('either zone_letter or northern needs to be set')
elif zone_letter and northern is not None:
raise ValueError('set either zone_letter or northern, but not both')
if strict:
if not in_bounds(easting, 100000, 1000000, upper_strict=True):
raise OutOfRangeError('easting out of range (must be between 100.000 m and 999.999 m)')
if not in_bounds(northing, 0, 10000000):
raise OutOfRangeError('northing out of range (must be between 0 m and 10.000.000 m)')
check_valid_zone(zone_number, zone_letter)
if zone_letter:
zone_letter = zone_letter.upper()
northern = (zone_letter >= 'N')
x = easting - 500000
y = northing
if not northern:
y -= 10000000
m = y / K0
mu = m / (R * M1)
p_rad = (mu +
P2 * mathlib.sin(2 * mu) +
P3 * mathlib.sin(4 * mu) +
P4 * mathlib.sin(6 * mu) +
P5 * mathlib.sin(8 * mu))
p_sin = mathlib.sin(p_rad)
p_sin2 = p_sin * p_sin
p_cos = mathlib.cos(p_rad)
p_tan = p_sin / p_cos
p_tan2 = p_tan * p_tan
p_tan4 = p_tan2 * p_tan2
ep_sin = 1 - E * p_sin2
ep_sin_sqrt = mathlib.sqrt(1 - E * p_sin2)
n = R / ep_sin_sqrt
r = (1 - E) / ep_sin
c = _E * p_cos**2
c2 = c * c
d = x / (n * K0)
d2 = d * d
d3 = d2 * d
d4 = d3 * d
d5 = d4 * d
d6 = d5 * d
latitude = (p_rad - (p_tan / r) *
(d2 / 2 -
d4 / 24 * (5 + 3 * p_tan2 + 10 * c - 4 * c2 - 9 * E_P2)) +
d6 / 720 * (61 + 90 * p_tan2 + 298 * c + 45 * p_tan4 - 252 * E_P2 - 3 * c2))
longitude = (d -
d3 / 6 * (1 + 2 * p_tan2 + c) +
d5 / 120 * (5 - 2 * c + 28 * p_tan2 - 3 * c2 + 8 * E_P2 + 24 * p_tan4)) / p_cos
return (mathlib.degrees(latitude),
mathlib.degrees(longitude) + zone_number_to_central_longitude(zone_number))
|
This function convert an UTM coordinate into Latitude and Longitude
Parameters
----------
easting: int
Easting value of UTM coordinate
northing: int
Northing value of UTM coordinate
zone number: int
Zone Number is represented with global map numbers of an UTM Zone
Numbers Map. More information see utmzones [1]_
zone_letter: str
Zone Letter can be represented as string values. Where UTM Zone
Designators can be accessed in [1]_
northern: bool
You can set True or False to set this parameter. Default is None
.. _[1]: http://www.jaworski.ca/utmzones.htm
|
def normalize_pdf(mu, pofmu):
"""
Takes a function pofmu defined at rate sample values mu and
normalizes it to be a suitable pdf. Both mu and pofmu must be
arrays or lists of the same length.
"""
if min(pofmu) < 0:
raise ValueError("Probabilities cannot be negative, don't ask me to "
"normalize a function with negative values!")
if min(mu) < 0:
raise ValueError("Rates cannot be negative, don't ask me to "
"normalize a function over a negative domain!")
dp = integral_element(mu, pofmu)
return mu, pofmu/sum(dp)
|
Takes a function pofmu defined at rate sample values mu and
normalizes it to be a suitable pdf. Both mu and pofmu must be
arrays or lists of the same length.
|
def create_ui(self):
'''
.. versionchanged:: 0.20
Debounce window expose and resize handlers to improve
responsiveness.
.. versionchanged:: X.X.X
Call debounced `_on_expose_event` handler on _leading_ edge to make
UI update more responsive when, e.g., changing window focus.
Decrease debounce time to 250 ms.
'''
super(GtkShapesCanvasView, self).create_ui()
self.widget.set_events(gtk.gdk.BUTTON_PRESS |
gtk.gdk.BUTTON_RELEASE |
gtk.gdk.BUTTON_MOTION_MASK |
gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK)
self._dirty_check_timeout_id = gtk.timeout_add(30, self.check_dirty)
self.resize = Debounce(self._resize, wait=250)
debounced_on_expose_event = Debounce(self._on_expose_event, wait=250,
leading=True, trailing=True)
self.widget.connect('expose-event', debounced_on_expose_event)
|
.. versionchanged:: 0.20
Debounce window expose and resize handlers to improve
responsiveness.
.. versionchanged:: X.X.X
Call debounced `_on_expose_event` handler on _leading_ edge to make
UI update more responsive when, e.g., changing window focus.
Decrease debounce time to 250 ms.
|
def split_namespace(clarkName):
"""Return (namespace, localname) tuple for a property name in Clark Notation.
Namespace defaults to ''.
Example:
'{DAV:}foo' -> ('DAV:', 'foo')
'bar' -> ('', 'bar')
"""
if clarkName.startswith("{") and "}" in clarkName:
ns, localname = clarkName.split("}", 1)
return (ns[1:], localname)
return ("", clarkName)
|
Return (namespace, localname) tuple for a property name in Clark Notation.
Namespace defaults to ''.
Example:
'{DAV:}foo' -> ('DAV:', 'foo')
'bar' -> ('', 'bar')
|
def merge(self, other):
"""
Merges the two values
"""
other = self.coerce(other)
if list_diff(self.domain, other.domain) != []:
raise Exception("Incomparable orderings. Different domains")
if self.is_equal(other):
# pick among dependencies
return self
elif other.is_entailed_by(self):
return self
elif self.is_entailed_by(other):
self.low, self.high = other.low, other.high
elif self.is_contradictory(other):
raise Contradiction("Cannot merge %s and %s" % (self, other))
else:
# information in both
to_i = self.to_i
self.low = self.domain[max(map(to_i, [self.low, other.low]))]
self.high =self.domain[min(map(to_i, [self.high, other.high]))]
return self
|
Merges the two values
|
def _set_key(self):
'''
sets the final key to be used currently
'''
if self.roll:
self.date = time.strftime(self.date_format,
time.gmtime(self.start_time))
self.final_key = '{}:{}'.format(self.key, self.date)
else:
self.final_key = self.key
|
sets the final key to be used currently
|
def unpin_chat_message(self, *args, **kwargs):
"""See :func:`unpin_chat_message`"""
return unpin_chat_message(*args, **self._merge_overrides(**kwargs)).run()
|
See :func:`unpin_chat_message`
|
def parse_bitcode(bitcode, context=None):
"""
Create Module from a LLVM *bitcode* (a bytes object).
"""
if context is None:
context = get_global_context()
buf = c_char_p(bitcode)
bufsize = len(bitcode)
with ffi.OutputString() as errmsg:
mod = ModuleRef(ffi.lib.LLVMPY_ParseBitcode(
context, buf, bufsize, errmsg), context)
if errmsg:
mod.close()
raise RuntimeError(
"LLVM bitcode parsing error\n{0}".format(errmsg))
return mod
|
Create Module from a LLVM *bitcode* (a bytes object).
|
def unsubscribe(self):
"""Unsubscribes this subscriber from the associated list."""
body = {
"EmailAddress": self.email_address}
response = self._post("/subscribers/%s/unsubscribe.json" %
self.list_id, json.dumps(body))
|
Unsubscribes this subscriber from the associated list.
|
def add(self, tipo_opcao, nome_opcao):
"""Inserts a new Option Pool and returns its identifier.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'id': < id > , 'type':<type>, 'name':<name>}
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
#optionpool_map = dict()
#optionpool_map['type'] = tipo_opcao
#optionpool_map['name'] = nome_opcao
url='api/pools/options/save/'
return self.post(url, {'type': tipo_opcao, "name":nome_opcao })
|
Inserts a new Option Pool and returns its identifier.
:param tipo_opcao: Type. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:param nome_opcao_txt: Name Option. String with a maximum of 50 characters and respect [a-zA-Z\_-]
:return: Following dictionary:
::
{'id': < id > , 'type':<type>, 'name':<name>}
:raise InvalidParameterError: The value of tipo_opcao or nome_opcao_txt is invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
|
def write_short(self, number):
""" Writes a short integer to the underlying output file as a 2-byte value. """
buf = pack(self.byte_order + "h", number)
self.write(buf)
|
Writes a short integer to the underlying output file as a 2-byte value.
|
def find_ctrlpts_surface(t_u, t_v, surf, **kwargs):
""" Finds the control points involved in the evaluation of the surface point defined by the input parameter pair.
This function uses a modified version of the algorithm *A3.5 SurfacePoint* from The NURBS Book by Piegl & Tiller.
:param t_u: parameter on the u-direction
:type t_u: float
:param t_v: parameter on the v-direction
:type t_v: float
:param surf: input surface
:type surf: abstract.Surface
:return: 2-dimensional control points array
:rtype: list
"""
# Get keyword arguments
span_func = kwargs.get('find_span_func', helpers.find_span_linear)
# Find spans
span_u = span_func(surf.degree_u, surf.knotvector_u, surf.ctrlpts_size_u, t_u)
span_v = span_func(surf.degree_v, surf.knotvector_v, surf.ctrlpts_size_v, t_v)
# Constant indices
idx_u = span_u - surf.degree_u
idx_v = span_v - surf.degree_v
# Find control points involved in evaluation of the surface point at the input parameter pair (u, v)
surf_ctrlpts = [[] for _ in range(surf.degree_u + 1)]
for k in range(surf.degree_u + 1):
temp = [() for _ in range(surf.degree_v + 1)]
for l in range(surf.degree_v + 1):
temp[l] = surf.ctrlpts2d[idx_u + k][idx_v + l]
surf_ctrlpts[k] = temp
# Return 2-dimensional control points array
return surf_ctrlpts
|
Finds the control points involved in the evaluation of the surface point defined by the input parameter pair.
This function uses a modified version of the algorithm *A3.5 SurfacePoint* from The NURBS Book by Piegl & Tiller.
:param t_u: parameter on the u-direction
:type t_u: float
:param t_v: parameter on the v-direction
:type t_v: float
:param surf: input surface
:type surf: abstract.Surface
:return: 2-dimensional control points array
:rtype: list
|
def format_npm_command_for_logging(command):
"""Convert npm command list to string for display to user."""
if platform.system().lower() == 'windows':
if command[0] == 'npx.cmd' and command[1] == '-c':
return "npx.cmd -c \"%s\"" % " ".join(command[2:])
return " ".join(command)
# Strip out redundant npx quotes not needed when executing the command
# directly
return " ".join(command).replace('\'\'', '\'')
|
Convert npm command list to string for display to user.
|
def update_unit(self, unit_id, unit_dict):
"""
Updates an unit
:param unit_id: the unit id
:param unit_dict: dict
:return: dict
"""
return self._create_put_request(resource=UNITS, billomat_id=unit_id, send_data=unit_dict)
|
Updates an unit
:param unit_id: the unit id
:param unit_dict: dict
:return: dict
|
def scopusRecordParser(record, header = None):
"""The parser [ScopusRecords](../classes/ScopusRecord.html#metaknowledge.scopus.ScopusRecord) use. This takes a line from [scopusParser()](#metaknowledge.scopus.scopusHandlers.scopusParser) and parses it as a part of the creation of a `ScopusRecord`.
**Note** this is for csv files downloaded from scopus _not_ the text records as those are less complete. Also, Scopus uses double quotes (`"`) to quote strings, such as abstracts, in the csv so double quotes in the string must be escaped. For reasons not fully understandable by mortals they choose to use two double quotes in a row (`""`) to represent an escaped double quote. This parser does not unescape these quotes, but it does correctly handle their interacts with the outer double quotes.
# Parameters
_record_ : `str`
> string ending with a newline containing the record's entry
# Returns
`dict`
> A dictionary of the key-vaue pairs in the entry
"""
if header is None:
header = scopusHeader
splitRecord = record[:-1].split(',')
tagDict = {}
quoted = False
for key in reversed(header):
currentVal = splitRecord.pop()
if currentVal == '':
pass
elif currentVal[-1] == '"':
if re.match(firstQuotingRegex, currentVal) is None:
valString = ',' + currentVal[:-1]
currentVal = splitRecord.pop()
#double quotes (") are escaped by proceeding them with another double quote
#So an entry containing:
#',"stuff,""quoted"",more stuff,""more quoted""",'
#would be a single string belonging to 1 column that looks like:
#'stuff,"quoted",more stuff,"more quoted"'
#We are not going to unescape the quotation marks but we do have to deal with them
while re.match(innerQuotingRegex, currentVal) is None:
valString = ',' + currentVal + valString
currentVal = splitRecord.pop()
valString = currentVal[1:] + valString
else:
try:
valString = currentVal[1:-1]
except ValueError:
valString = currentVal[1:-1]
tagDict[key] = valString
else:
tagDict[key] = currentVal
return tagDict
|
The parser [ScopusRecords](../classes/ScopusRecord.html#metaknowledge.scopus.ScopusRecord) use. This takes a line from [scopusParser()](#metaknowledge.scopus.scopusHandlers.scopusParser) and parses it as a part of the creation of a `ScopusRecord`.
**Note** this is for csv files downloaded from scopus _not_ the text records as those are less complete. Also, Scopus uses double quotes (`"`) to quote strings, such as abstracts, in the csv so double quotes in the string must be escaped. For reasons not fully understandable by mortals they choose to use two double quotes in a row (`""`) to represent an escaped double quote. This parser does not unescape these quotes, but it does correctly handle their interacts with the outer double quotes.
# Parameters
_record_ : `str`
> string ending with a newline containing the record's entry
# Returns
`dict`
> A dictionary of the key-vaue pairs in the entry
|
def run(self, host='localhost', port=1234):
"""
Launch the server. Will run forever accepting connections until interrupted.
Parameters:
* host: The host to listen on
* port: The port to listen on
"""
# Setup loop
loop = asyncio.get_event_loop()
coro = asyncio.start_server(self._handle, host, port, loop=loop)
try:
server = loop.run_until_complete(coro)
except Exception as e:
self._logger.error('Could not launch server: {}'.format(e))
return
# Run the server
self._logger.info('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
|
Launch the server. Will run forever accepting connections until interrupted.
Parameters:
* host: The host to listen on
* port: The port to listen on
|
def has_delete_permission(self, request, obj=None):
"""
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overriden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
"""
opts = self.opts
return request.user.has_perm(opts.app_label + '.' + opts.get_delete_permission(), obj)
|
Returns True if the given request has permission to change the given
Django model instance, the default implementation doesn't examine the
`obj` parameter.
Can be overriden by the user in subclasses. In such case it should
return True if the given request has permission to delete the `obj`
model instance. If `obj` is None, this should return True if the given
request has permission to delete *any* object of the given type.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.