code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def set_id(self,my_id):
"""
Sets the opinion identifier
@type my_id: string
@param my_id: the opinion identifier
"""
if self.type == 'NAF':
self.node.set('id',my_id)
elif self.type == 'KAF':
self.node.set('oid',my_id)
|
Sets the opinion identifier
@type my_id: string
@param my_id: the opinion identifier
|
def save_global(self, obj, name=None, pack=struct.pack):
"""
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
"""
if obj is type(None):
return self.save_reduce(type, (None,), obj=obj)
elif obj is type(Ellipsis):
return self.save_reduce(type, (Ellipsis,), obj=obj)
elif obj is type(NotImplemented):
return self.save_reduce(type, (NotImplemented,), obj=obj)
if obj.__module__ == "__main__":
return self.save_dynamic_class(obj)
try:
return Pickler.save_global(self, obj, name=name)
except Exception:
if obj.__module__ == "__builtin__" or obj.__module__ == "builtins":
if obj in _BUILTIN_TYPE_NAMES:
return self.save_reduce(
_builtin_type, (_BUILTIN_TYPE_NAMES[obj],), obj=obj)
typ = type(obj)
if typ is not obj and isinstance(obj, (type, types.ClassType)):
return self.save_dynamic_class(obj)
raise
|
Save a "global".
The name of this method is somewhat misleading: all types get
dispatched here.
|
def Polygon(pos=(0, 0, 0), normal=(0, 0, 1), nsides=6, r=1, c="coral",
bc="darkgreen", lw=1, alpha=1, followcam=False):
"""
Build a 2D polygon of `nsides` of radius `r` oriented as `normal`.
:param followcam: if `True` the text will auto-orient itself to the active camera.
A ``vtkCamera`` object can also be passed.
:type followcam: bool, vtkCamera
|Polygon|
"""
ps = vtk.vtkRegularPolygonSource()
ps.SetNumberOfSides(nsides)
ps.SetRadius(r)
ps.SetNormal(-np.array(normal))
ps.Update()
tf = vtk.vtkTriangleFilter()
tf.SetInputConnection(ps.GetOutputPort())
tf.Update()
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(tf.GetOutputPort())
if followcam:
actor = vtk.vtkFollower()
if isinstance(followcam, vtk.vtkCamera):
actor.SetCamera(followcam)
else:
actor.SetCamera(settings.plotter_instance.camera)
else:
actor = Actor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.getColor(c))
actor.GetProperty().SetOpacity(alpha)
actor.GetProperty().SetLineWidth(lw)
actor.GetProperty().SetInterpolationToFlat()
if bc: # defines a specific color for the backface
backProp = vtk.vtkProperty()
backProp.SetDiffuseColor(colors.getColor(bc))
backProp.SetOpacity(alpha)
actor.SetBackfaceProperty(backProp)
actor.SetPosition(pos)
settings.collectable_actors.append(actor)
return actor
|
Build a 2D polygon of `nsides` of radius `r` oriented as `normal`.
:param followcam: if `True` the text will auto-orient itself to the active camera.
A ``vtkCamera`` object can also be passed.
:type followcam: bool, vtkCamera
|Polygon|
|
def _extended_lookup(
datastore_api,
project,
key_pbs,
missing=None,
deferred=None,
eventual=False,
transaction_id=None,
):
"""Repeat lookup until all keys found (unless stop requested).
Helper function for :meth:`Client.get_multi`.
:type datastore_api:
:class:`google.cloud.datastore._http.HTTPDatastoreAPI`
or :class:`google.cloud.datastore_v1.gapic.DatastoreClient`
:param datastore_api: The datastore API object used to connect
to datastore.
:type project: str
:param project: The project to make the request for.
:type key_pbs: list of :class:`.entity_pb2.Key`
:param key_pbs: The keys to retrieve from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entity
protobufs returned by the backend as "missing" will be
copied into it.
:type deferred: list
:param deferred: (Optional) If a list is passed, the key protobufs returned
by the backend as "deferred" will be copied into it.
:type eventual: bool
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: str
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
:rtype: list of :class:`.entity_pb2.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if missing / deferred are not null or
empty list.
"""
if missing is not None and missing != []:
raise ValueError("missing must be None or an empty list")
if deferred is not None and deferred != []:
raise ValueError("deferred must be None or an empty list")
results = []
loop_num = 0
read_options = helpers.get_read_options(eventual, transaction_id)
while loop_num < _MAX_LOOPS: # loop against possible deferred.
loop_num += 1
lookup_response = datastore_api.lookup(
project, key_pbs, read_options=read_options
)
# Accumulate the new results.
results.extend(result.entity for result in lookup_response.found)
if missing is not None:
missing.extend(result.entity for result in lookup_response.missing)
if deferred is not None:
deferred.extend(lookup_response.deferred)
break
if len(lookup_response.deferred) == 0:
break
# We have deferred keys, and the user didn't ask to know about
# them, so retry (but only with the deferred ones).
key_pbs = lookup_response.deferred
return results
|
Repeat lookup until all keys found (unless stop requested).
Helper function for :meth:`Client.get_multi`.
:type datastore_api:
:class:`google.cloud.datastore._http.HTTPDatastoreAPI`
or :class:`google.cloud.datastore_v1.gapic.DatastoreClient`
:param datastore_api: The datastore API object used to connect
to datastore.
:type project: str
:param project: The project to make the request for.
:type key_pbs: list of :class:`.entity_pb2.Key`
:param key_pbs: The keys to retrieve from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entity
protobufs returned by the backend as "missing" will be
copied into it.
:type deferred: list
:param deferred: (Optional) If a list is passed, the key protobufs returned
by the backend as "deferred" will be copied into it.
:type eventual: bool
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: str
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
:rtype: list of :class:`.entity_pb2.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if missing / deferred are not null or
empty list.
|
def _parse_volumes(volume_values: dict) -> str:
"""Parse volumes key.
Args:
volume_values (dict): volume configuration values
Returns:
string, volume specification with mount source and container path
"""
for v_values in volume_values:
for v_key, v_value in v_values.items():
if v_key == 'source':
if v_value == '.':
source = os.path.dirname(
os.path.abspath(__file__))
else:
source = v_value
if v_key == 'target':
target = v_value
volume_spec = [source + ':' + target]
return volume_spec
|
Parse volumes key.
Args:
volume_values (dict): volume configuration values
Returns:
string, volume specification with mount source and container path
|
def libvlc_media_get_user_data(p_md):
'''Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
'''
f = _Cfunctions.get('libvlc_media_get_user_data', None) or \
_Cfunction('libvlc_media_get_user_data', ((1,),), None,
ctypes.c_void_p, Media)
return f(p_md)
|
Get media descriptor's user_data. user_data is specialized data
accessed by the host application, VLC.framework uses it as a pointer to
an native object that references a L{Media} pointer.
@param p_md: media descriptor object.
|
def _get_current_names(current, dsn, pc):
"""
Get the table name and variable name from the given time series entry
:param dict current: Time series entry
:param str pc: paleoData or chronData
:return str _table_name:
:return str _variable_name:
"""
_table_name = ""
_variable_name = ""
# Get key info
try:
_table_name = current['{}_tableName'.format(pc)]
_variable_name = current['{}_variableName'.format(pc)]
except Exception as e:
print("Error: Unable to collapse time series: {}, {}".format(dsn, e))
logger_ts.error("get_current: {}, {}".format(dsn, e))
return _table_name, _variable_name
|
Get the table name and variable name from the given time series entry
:param dict current: Time series entry
:param str pc: paleoData or chronData
:return str _table_name:
:return str _variable_name:
|
def remove_content(self, *keys):
"""
Removes given content from the cache.
Usage::
>>> cache = Cache()
>>> cache.add_content(John="Doe", Luke="Skywalker")
True
>>> cache.remove_content("Luke", "John")
True
>>> cache
{}
:param \*keys: Content to remove.
:type \*keys: \*
:return: Method success.
:rtype: bool
"""
LOGGER.debug("> Removing '{0}' content from the cache.".format(self.__class__.__name__, keys))
for key in keys:
if not key in self:
raise KeyError("{0} | '{1}' key doesn't exists in cache content!".format(self.__class__.__name__, key))
del self[key]
return True
|
Removes given content from the cache.
Usage::
>>> cache = Cache()
>>> cache.add_content(John="Doe", Luke="Skywalker")
True
>>> cache.remove_content("Luke", "John")
True
>>> cache
{}
:param \*keys: Content to remove.
:type \*keys: \*
:return: Method success.
:rtype: bool
|
def cg(output,
show,
verbose,
classname,
methodname,
descriptor,
accessflag,
no_isolated,
apk):
"""
Create a call graph and export it into a graph format.
classnames are found in the type "Lfoo/bar/bla;".
Example:
\b
$ androguard cg APK
"""
androcg_main(verbose=verbose,
APK=apk,
classname=classname,
methodname=methodname,
descriptor=descriptor,
accessflag=accessflag,
no_isolated=no_isolated,
show=show,
output=output)
|
Create a call graph and export it into a graph format.
classnames are found in the type "Lfoo/bar/bla;".
Example:
\b
$ androguard cg APK
|
def delete_topic(self, project, topic, fail_if_not_exists=False):
"""Deletes a Pub/Sub topic if it exists.
:param project: the GCP project ID in which to delete the topic
:type project: str
:param topic: the Pub/Sub topic name to delete; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
"""
service = self.get_conn()
full_topic = _format_topic(project, topic)
try:
service.projects().topics().delete(topic=full_topic).execute(num_retries=self.num_retries)
except HttpError as e:
# Status code 409 indicates that the topic was not found
if str(e.resp['status']) == '404':
message = 'Topic does not exist: {}'.format(full_topic)
self.log.warning(message)
if fail_if_not_exists:
raise PubSubException(message)
else:
raise PubSubException(
'Error deleting topic {}'.format(full_topic), e)
|
Deletes a Pub/Sub topic if it exists.
:param project: the GCP project ID in which to delete the topic
:type project: str
:param topic: the Pub/Sub topic name to delete; do not
include the ``projects/{project}/topics/`` prefix.
:type topic: str
:param fail_if_not_exists: if set, raise an exception if the topic
does not exist
:type fail_if_not_exists: bool
|
def astra_cuda_bp_scaling_factor(proj_space, reco_space, geometry):
"""Volume scaling accounting for differing adjoint definitions.
ASTRA defines the adjoint operator in terms of a fully discrete
setting (transposed "projection matrix") without any relation to
physical dimensions, which makes a re-scaling necessary to
translate it to spaces with physical dimensions.
Behavior of ASTRA changes slightly between versions, so we keep
track of it and adapt the scaling accordingly.
"""
# Angular integration weighting factor
# angle interval weight by approximate cell volume
angle_extent = geometry.motion_partition.extent
num_angles = geometry.motion_partition.shape
# TODO: this gives the wrong factor for Parallel3dEulerGeometry with
# 2 angles
scaling_factor = (angle_extent / num_angles).prod()
# Correct in case of non-weighted spaces
proj_extent = float(proj_space.partition.extent.prod())
proj_size = float(proj_space.partition.size)
proj_weighting = proj_extent / proj_size
scaling_factor *= (proj_space.weighting.const /
proj_weighting)
scaling_factor /= (reco_space.weighting.const /
reco_space.cell_volume)
if parse_version(ASTRA_VERSION) < parse_version('1.8rc1'):
if isinstance(geometry, Parallel2dGeometry):
# Scales with 1 / cell_volume
scaling_factor *= float(reco_space.cell_volume)
elif (isinstance(geometry, FanBeamGeometry)
and geometry.det_curvature_radius is None):
# Scales with 1 / cell_volume
scaling_factor *= float(reco_space.cell_volume)
# Additional magnification correction
src_radius = geometry.src_radius
det_radius = geometry.det_radius
scaling_factor *= ((src_radius + det_radius) / src_radius)
elif isinstance(geometry, Parallel3dAxisGeometry):
# Scales with voxel stride
# In 1.7, only cubic voxels are supported
voxel_stride = reco_space.cell_sides[0]
scaling_factor /= float(voxel_stride)
elif isinstance(geometry, ConeFlatGeometry):
# Scales with 1 / cell_volume
# In 1.7, only cubic voxels are supported
voxel_stride = reco_space.cell_sides[0]
scaling_factor /= float(voxel_stride)
# Magnification correction
src_radius = geometry.src_radius
det_radius = geometry.det_radius
scaling_factor *= ((src_radius + det_radius) / src_radius) ** 2
# Check if the development version of astra is used
if parse_version(ASTRA_VERSION) == parse_version('1.9.0dev'):
if isinstance(geometry, Parallel2dGeometry):
# Scales with 1 / cell_volume
scaling_factor *= float(reco_space.cell_volume)
elif (isinstance(geometry, FanBeamGeometry)
and geometry.det_curvature_radius is None):
# Scales with 1 / cell_volume
scaling_factor *= float(reco_space.cell_volume)
# Magnification correction
src_radius = geometry.src_radius
det_radius = geometry.det_radius
scaling_factor *= ((src_radius + det_radius) / src_radius)
elif isinstance(geometry, Parallel3dAxisGeometry):
# Scales with cell volume
# currently only square voxels are supported
scaling_factor /= reco_space.cell_volume
elif isinstance(geometry, ConeFlatGeometry):
# Scales with cell volume
scaling_factor /= reco_space.cell_volume
# Magnification correction (scaling = 1 / magnification ** 2)
src_radius = geometry.src_radius
det_radius = geometry.det_radius
scaling_factor *= ((src_radius + det_radius) / src_radius) ** 2
# Correction for scaled 1/r^2 factor in ASTRA's density weighting.
# This compensates for scaled voxels and pixels, as well as a
# missing factor src_radius ** 2 in the ASTRA BP with
# density weighting.
det_px_area = geometry.det_partition.cell_volume
scaling_factor *= (src_radius ** 2 * det_px_area ** 2)
else:
if isinstance(geometry, Parallel2dGeometry):
# Scales with 1 / cell_volume
scaling_factor *= float(reco_space.cell_volume)
elif (isinstance(geometry, FanBeamGeometry)
and geometry.det_curvature_radius is None):
# Scales with 1 / cell_volume
scaling_factor *= float(reco_space.cell_volume)
# Magnification correction
src_radius = geometry.src_radius
det_radius = geometry.det_radius
scaling_factor *= ((src_radius + det_radius) / src_radius)
elif isinstance(geometry, Parallel3dAxisGeometry):
# Scales with cell volume
# currently only square voxels are supported
scaling_factor /= reco_space.cell_volume
elif isinstance(geometry, ConeFlatGeometry):
# Scales with cell volume
scaling_factor /= reco_space.cell_volume
# Magnification correction (scaling = 1 / magnification ** 2)
src_radius = geometry.src_radius
det_radius = geometry.det_radius
scaling_factor *= ((src_radius + det_radius) / src_radius) ** 2
# Correction for scaled 1/r^2 factor in ASTRA's density weighting.
# This compensates for scaled voxels and pixels, as well as a
# missing factor src_radius ** 2 in the ASTRA BP with
# density weighting.
det_px_area = geometry.det_partition.cell_volume
scaling_factor *= (src_radius ** 2 * det_px_area ** 2 /
reco_space.cell_volume ** 2)
# TODO: add case with new ASTRA release
return scaling_factor
|
Volume scaling accounting for differing adjoint definitions.
ASTRA defines the adjoint operator in terms of a fully discrete
setting (transposed "projection matrix") without any relation to
physical dimensions, which makes a re-scaling necessary to
translate it to spaces with physical dimensions.
Behavior of ASTRA changes slightly between versions, so we keep
track of it and adapt the scaling accordingly.
|
def device_action(device_id, action_id):
""" Initiate device action via HTTP GET. """
success = False
if device_id in devices:
input_cmd = getattr(devices[device_id], action_id, None)
if callable(input_cmd):
input_cmd()
success = True
return jsonify(success=success)
|
Initiate device action via HTTP GET.
|
def close(self, response):
"""Close connection to database."""
LOGGER.info('Closing [%s]', os.getpid())
if not self.database.is_closed():
self.database.close()
return response
|
Close connection to database.
|
def non_transactional(func, args, kwds, allow_existing=True):
"""A decorator that ensures a function is run outside a transaction.
If there is an existing transaction (and allow_existing=True), the
existing transaction is paused while the function is executed.
Args:
allow_existing: If false, throw an exception if called from within
a transaction. If true, temporarily re-establish the
previous non-transactional context. Defaults to True.
This supports two forms, similar to transactional().
Returns:
A wrapper for the decorated function that ensures it runs outside a
transaction.
"""
from . import tasklets
ctx = tasklets.get_context()
if not ctx.in_transaction():
return func(*args, **kwds)
if not allow_existing:
raise datastore_errors.BadRequestError(
'%s cannot be called within a transaction.' % func.__name__)
save_ctx = ctx
while ctx.in_transaction():
ctx = ctx._parent_context
if ctx is None:
raise datastore_errors.BadRequestError(
'Context without non-transactional ancestor')
save_ds_conn = datastore._GetConnection()
try:
if hasattr(save_ctx, '_old_ds_conn'):
datastore._SetConnection(save_ctx._old_ds_conn)
tasklets.set_context(ctx)
return func(*args, **kwds)
finally:
tasklets.set_context(save_ctx)
datastore._SetConnection(save_ds_conn)
|
A decorator that ensures a function is run outside a transaction.
If there is an existing transaction (and allow_existing=True), the
existing transaction is paused while the function is executed.
Args:
allow_existing: If false, throw an exception if called from within
a transaction. If true, temporarily re-establish the
previous non-transactional context. Defaults to True.
This supports two forms, similar to transactional().
Returns:
A wrapper for the decorated function that ensures it runs outside a
transaction.
|
def set_interval(self, interval):
"""Set timer interval (ms)."""
self._interval = interval
if self.timer is not None:
self.timer.setInterval(interval)
|
Set timer interval (ms).
|
def cmd_status(self, run, finished=False):
"""Given a :class:`~clusterjob.AsyncResult` instance, return a command
that queries the scheduler for the job status, as a list of command
arguments. If ``finished=True``, the scheduler is queried via
``sacct``. Otherwise, ``squeue`` is used.
"""
if finished:
return ['sacct', '--format=state', '-n', '-j', str(run.job_id)]
else:
return ['squeue', '-h', '-o', '%T', '-j', str(run.job_id)]
|
Given a :class:`~clusterjob.AsyncResult` instance, return a command
that queries the scheduler for the job status, as a list of command
arguments. If ``finished=True``, the scheduler is queried via
``sacct``. Otherwise, ``squeue`` is used.
|
def get_help(func):
"""Return usage information about a context or function.
For contexts, just return the context name and its docstring
For functions, return the function signature as well as its
argument types.
Args:
func (callable): An annotated callable function
Returns:
str: The formatted help text
"""
help_text = ""
if isinstance(func, dict):
name = context_name(func)
help_text = "\n" + name + "\n\n"
doc = inspect.getdoc(func)
if doc is not None:
doc = inspect.cleandoc(doc)
help_text += doc + '\n'
return help_text
sig = func.metadata.signature()
doc = inspect.getdoc(func)
if doc is not None:
doc = inspect.cleandoc(doc)
help_text += "\n" + sig + "\n\n"
if doc is not None:
help_text += doc + '\n'
if inspect.isclass(func):
func = func.__init__
# If we derived the parameter annotations from a docstring,
# don't insert a custom arguments section since it already
# exists.
if func.metadata.load_from_doc:
return help_text
help_text += "\nArguments:\n"
for key, info in func.metadata.annotated_params.items():
type_name = info.type_name
desc = ""
if info.desc is not None:
desc = info.desc
help_text += " - %s (%s): %s\n" % (key, type_name, desc)
return help_text
|
Return usage information about a context or function.
For contexts, just return the context name and its docstring
For functions, return the function signature as well as its
argument types.
Args:
func (callable): An annotated callable function
Returns:
str: The formatted help text
|
def alter_partitions_with_environment_context(self, db_name, tbl_name, new_parts, environment_context):
"""
Parameters:
- db_name
- tbl_name
- new_parts
- environment_context
"""
self.send_alter_partitions_with_environment_context(db_name, tbl_name, new_parts, environment_context)
self.recv_alter_partitions_with_environment_context()
|
Parameters:
- db_name
- tbl_name
- new_parts
- environment_context
|
def files(self):
""" Returns a list of all the files that match the given
input token.
"""
res = None
if not res:
res = glob.glob(self.path)
if not res and self.is_glob:
res = glob.glob(self.magic_path)
if not res:
res = glob.glob(self.alias)
if not res:
raise ValueError('No files match. %s' % self)
return res
|
Returns a list of all the files that match the given
input token.
|
def get_cluster_custom_object(self, group, version, plural, name, **kwargs): # noqa: E501
"""get_cluster_custom_object # noqa: E501
Returns a cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cluster_custom_object(group, version, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_cluster_custom_object_with_http_info(group, version, plural, name, **kwargs) # noqa: E501
else:
(data) = self.get_cluster_custom_object_with_http_info(group, version, plural, name, **kwargs) # noqa: E501
return data
|
get_cluster_custom_object # noqa: E501
Returns a cluster scoped custom object # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cluster_custom_object(group, version, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str plural: the custom object's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread.
|
def generate_keywords(additional_keywords=None):
"""Generates gettext keywords list
:arg additional_keywords: dict of keyword -> value
:returns: dict of keyword -> values for Babel extraction
Here's what Babel has for DEFAULT_KEYWORDS::
DEFAULT_KEYWORDS = {
'_': None,
'gettext': None,
'ngettext': (1, 2),
'ugettext': None,
'ungettext': (1, 2),
'dgettext': (2,),
'dngettext': (2, 3),
'N_': None,
'pgettext': ((1, 'c'), 2)
}
If you wanted to add a new one ``_frank`` that was like
gettext, then you'd do this::
generate_keywords({'_frank': None})
If you wanted to add a new one ``upgettext`` that was like
gettext, then you'd do this::
generate_keywords({'upgettext': ((1, 'c'), 2)})
"""
# Shallow copy
keywords = dict(BABEL_KEYWORDS)
keywords.update({
'_lazy': None,
'gettext_lazy': None,
'ugettext_lazy': None,
'gettext_noop': None,
'ugettext_noop': None,
'ngettext_lazy': (1, 2),
'ungettext_lazy': (1, 2),
'npgettext': ((1, 'c'), 2, 3),
'pgettext_lazy': ((1, 'c'), 2),
'npgettext_lazy': ((1, 'c'), 2, 3),
})
# Add specified keywords
if additional_keywords:
for key, val in additional_keywords.items():
keywords[key] = val
return keywords
|
Generates gettext keywords list
:arg additional_keywords: dict of keyword -> value
:returns: dict of keyword -> values for Babel extraction
Here's what Babel has for DEFAULT_KEYWORDS::
DEFAULT_KEYWORDS = {
'_': None,
'gettext': None,
'ngettext': (1, 2),
'ugettext': None,
'ungettext': (1, 2),
'dgettext': (2,),
'dngettext': (2, 3),
'N_': None,
'pgettext': ((1, 'c'), 2)
}
If you wanted to add a new one ``_frank`` that was like
gettext, then you'd do this::
generate_keywords({'_frank': None})
If you wanted to add a new one ``upgettext`` that was like
gettext, then you'd do this::
generate_keywords({'upgettext': ((1, 'c'), 2)})
|
def engine(self):
"""return the SqlAlchemy engine for this database."""
if not self._engine:
if 'postgres' in self.driver:
if 'connect_args' not in self.engine_kwargs:
self.engine_kwargs['connect_args'] = {
'application_name': '{}:{}'.format(self._application_prefix, os.getpid())
}
# For most use, a small pool is good to prevent connection exhaustion, but these settings may
# be too low for the main public web application.
self._engine = create_engine(self.dsn, echo=self._echo,
pool_size=5, max_overflow=5, **self.engine_kwargs)
else:
self._engine = create_engine(
self.dsn, echo=self._echo, **self.engine_kwargs)
#
# Disconnect connections that have a different PID from the one they were created in.
# This protects against re-use in multi-processing.
#
@event.listens_for(self._engine, 'connect')
def connect(dbapi_connection, connection_record):
connection_record.info['pid'] = os.getpid()
@event.listens_for(self._engine, 'checkout')
def checkout(dbapi_connection, connection_record, connection_proxy):
from sqlalchemy.exc import DisconnectionError
pid = os.getpid()
if connection_record.info['pid'] != pid:
connection_record.connection = connection_proxy.connection = None
raise DisconnectionError(
"Connection record belongs to pid %s, attempting to check out in pid %s" %
(connection_record.info['pid'], pid))
if self.driver == 'sqlite':
@event.listens_for(self._engine, 'connect')
def pragma_on_connect(dbapi_con, con_record):
"""ISSUE some Sqlite pragmas when the connection is created."""
# dbapi_con.execute('PRAGMA foreign_keys = ON;')
# Not clear that there is a performance improvement.
# dbapi_con.execute('PRAGMA journal_mode = WAL')
dbapi_con.execute('PRAGMA synchronous = OFF')
dbapi_con.execute('PRAGMA temp_store = MEMORY')
dbapi_con.execute('PRAGMA cache_size = 500000')
if self._foreign_keys:
dbapi_con.execute('PRAGMA foreign_keys=ON')
with self._engine.connect() as conn:
_validate_version(conn, self.dsn)
return self._engine
|
return the SqlAlchemy engine for this database.
|
def _do_delete(self):
"""
HTTP Delete Request
"""
return requests.delete(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
|
HTTP Delete Request
|
def add_rules(self, rules: _RuleList) -> None:
"""Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
"""
for rule in rules:
if isinstance(rule, (tuple, list)):
assert len(rule) in (2, 3, 4)
if isinstance(rule[0], basestring_type):
rule = Rule(PathMatches(rule[0]), *rule[1:])
else:
rule = Rule(*rule)
self.rules.append(self.process_rule(rule))
|
Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
|
def get_single_node(self) -> yaml.Node:
"""Hook used when loading a single document.
This is the hook we use to hook yatiml into ruamel.yaml. It is \
called by the yaml libray when the user uses load() to load a \
YAML document.
Returns:
A processed node representing the document.
"""
node = super().get_single_node()
if node is not None:
node = self.__process_node(node, type(self).document_type)
return node
|
Hook used when loading a single document.
This is the hook we use to hook yatiml into ruamel.yaml. It is \
called by the yaml libray when the user uses load() to load a \
YAML document.
Returns:
A processed node representing the document.
|
def hmset(self, name, mapping):
"""
Sets or updates the fields with their corresponding values.
:param name: str the name of the redis key
:param mapping: a dict with keys and values
:return: Future()
"""
with self.pipe as pipe:
m_encode = self.memberparse.encode
mapping = {m_encode(k): self._value_encode(k, v)
for k, v in mapping.items()}
return pipe.hmset(self.redis_key(name), mapping)
|
Sets or updates the fields with their corresponding values.
:param name: str the name of the redis key
:param mapping: a dict with keys and values
:return: Future()
|
def service_command(name, command):
"""Run an init.d/upstart command."""
service_command_template = getattr(env, 'ARGYLE_SERVICE_COMMAND_TEMPLATE',
u'/etc/init.d/%(name)s %(command)s')
sudo(service_command_template % {'name': name,
'command': command}, pty=False)
|
Run an init.d/upstart command.
|
def freader(filename, gz=False, bz=False):
""" Returns a filereader object that can handle gzipped input """
filecheck(filename)
if filename.endswith('.gz'):
gz = True
elif filename.endswith('.bz2'):
bz = True
if gz:
return gzip.open(filename, 'rb')
elif bz:
return bz2.BZ2File(filename, 'rb')
else:
return io.open(filename, 'rb')
|
Returns a filereader object that can handle gzipped input
|
def WriteTo(self, values):
"""Writes values to a byte stream.
Args:
values (tuple[object, ...]): values to copy to the byte stream.
Returns:
bytes: byte stream.
Raises:
IOError: if byte stream cannot be written.
OSError: if byte stream cannot be read.
"""
try:
return self._struct.pack(*values)
except (TypeError, struct.error) as exception:
raise IOError('Unable to write stream with error: {0!s}'.format(
exception))
|
Writes values to a byte stream.
Args:
values (tuple[object, ...]): values to copy to the byte stream.
Returns:
bytes: byte stream.
Raises:
IOError: if byte stream cannot be written.
OSError: if byte stream cannot be read.
|
def exit_with_error(message):
""" Display formatted error message and exit call """
click.secho(message, err=True, bg='red', fg='white')
sys.exit(0)
|
Display formatted error message and exit call
|
def is_attr_private(attrname: str) -> Optional[Match[str]]:
"""Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
"""
regex = re.compile("^_{2,}.*[^_]+_?$")
return regex.match(attrname)
|
Check that attribute name is private (at least two leading underscores,
at most one trailing underscore)
|
def extract_tmaster(self, topology):
"""
Returns the representation of tmaster that will
be returned from Tracker.
"""
tmasterLocation = {
"name": None,
"id": None,
"host": None,
"controller_port": None,
"master_port": None,
"stats_port": None,
}
if topology.tmaster:
tmasterLocation["name"] = topology.tmaster.topology_name
tmasterLocation["id"] = topology.tmaster.topology_id
tmasterLocation["host"] = topology.tmaster.host
tmasterLocation["controller_port"] = topology.tmaster.controller_port
tmasterLocation["master_port"] = topology.tmaster.master_port
tmasterLocation["stats_port"] = topology.tmaster.stats_port
return tmasterLocation
|
Returns the representation of tmaster that will
be returned from Tracker.
|
def img(self, **kwargs):
"""
Returns an XHTML <img/> tag of the chart
kwargs can be other img tag attributes, which are strictly enforced
uses strict escaping on the url, necessary for proper XHTML
"""
safe = 'src="%s" ' % self.url.replace('&','&').replace('<', '<')\
.replace('>', '>').replace('"', '"').replace( "'", ''')
for item in kwargs.items():
if not item[0] in IMGATTRS:
raise AttributeError('Invalid img tag attribute: %s'%item[0])
safe += '%s="%s" '%item
return '<img %s/>'%safe
|
Returns an XHTML <img/> tag of the chart
kwargs can be other img tag attributes, which are strictly enforced
uses strict escaping on the url, necessary for proper XHTML
|
def _read_channel(self):
"""Generic handler that will read all the data from an SSH or telnet channel."""
if self.protocol == "ssh":
output = ""
while True:
if self.remote_conn.recv_ready():
outbuf = self.remote_conn.recv(MAX_BUFFER)
if len(outbuf) == 0:
raise EOFError("Channel stream closed by remote device.")
output += outbuf.decode("utf-8", "ignore")
else:
break
elif self.protocol == "telnet":
output = self.remote_conn.read_very_eager().decode("utf-8", "ignore")
elif self.protocol == "serial":
output = ""
while self.remote_conn.in_waiting > 0:
output += self.remote_conn.read(self.remote_conn.in_waiting).decode(
"utf-8", "ignore"
)
log.debug("read_channel: {}".format(output))
self._write_session_log(output)
return output
|
Generic handler that will read all the data from an SSH or telnet channel.
|
def write_np_dat(self, store_format='csv', delimiter=',', fmt='%.12g'):
"""
Write TDS data stored in `self.np_vars` to the output file
Parameters
----------
store_format : str
dump format in ('csv', 'txt', 'hdf5')
delimiter : str
delimiter for the `csv` and `txt` format
fmt : str
output formatting template
Returns
-------
bool : success flag
"""
ret = False
system = self.system
# compute the total number of columns, excluding time
if not system.Recorder.n:
n_vars = system.dae.m + system.dae.n
# post-computed power flows include:
# bus - (Pi, Qi)
# line - (Pij, Pji, Qij, Qji, Iij_Real, Iij_Imag, Iji_real, Iji_Imag)
if system.tds.config.compute_flows:
n_vars += 2 * system.Bus.n + 8 * system.Line.n + 2 * system.Area.n_combination
idx = list(range(n_vars))
else:
n_vars = len(system.Recorder.varout_idx)
idx = system.Recorder.varout_idx
# prepare data
t_vars_concatenated = self.concat_t_vars_np(vars_idx=idx)
try:
os.makedirs(os.path.abspath(os.path.dirname(system.files.dat)), exist_ok=True)
with open(system.files.dat, self._mode) as f:
if store_format in ('csv', 'txt'):
np.savetxt(f, t_vars_concatenated, fmt=fmt, delimiter=delimiter)
elif store_format == 'hdf5':
pass
ret = True
logger.info('TDS data dumped to <{}>'.format(system.files.dat))
except IOError:
logger.error('I/O Error while writing the dat file.')
return ret
|
Write TDS data stored in `self.np_vars` to the output file
Parameters
----------
store_format : str
dump format in ('csv', 'txt', 'hdf5')
delimiter : str
delimiter for the `csv` and `txt` format
fmt : str
output formatting template
Returns
-------
bool : success flag
|
def binary_to_float(binary_list, lower_bound, upper_bound):
"""Return a floating point number between lower and upper bounds, from binary.
Args:
binary_list: list<int>; List of 0s and 1s.
The number of bits in this list determine the number of possible
values between lower and upper bound.
Increase the size of binary_list for more precise floating points.
lower_bound: Minimum value for output, inclusive.
A binary list of 0s will have this value.
upper_bound: Maximum value for output, inclusive.
A binary list of 1s will have this value.
Returns:
float; A floating point number.
"""
# Edge case for empty binary_list
if binary_list == []:
# With 0 bits, only one value can be represented,
# and we default to lower_bound
return lower_bound
# A little bit of math gets us a floating point
# number between upper and lower bound
# We look at the relative position of
# the integer corresponding to our binary list
# between the upper and lower bound,
# and offset that by lower bound
return ((
# Range between lower and upper bound
float(upper_bound - lower_bound)
# Divided by the maximum possible integer
/ (2**len(binary_list) - 1)
# Times the integer represented by the given binary
* binary_to_int(binary_list))
# Plus the lower bound
+ lower_bound)
|
Return a floating point number between lower and upper bounds, from binary.
Args:
binary_list: list<int>; List of 0s and 1s.
The number of bits in this list determine the number of possible
values between lower and upper bound.
Increase the size of binary_list for more precise floating points.
lower_bound: Minimum value for output, inclusive.
A binary list of 0s will have this value.
upper_bound: Maximum value for output, inclusive.
A binary list of 1s will have this value.
Returns:
float; A floating point number.
|
def value_at_coord(dset,coords):
'''returns value at specified coordinate in ``dset``'''
return nl.numberize(nl.run(['3dmaskave','-q','-dbox'] + list(coords) + [dset],stderr=None).output)
|
returns value at specified coordinate in ``dset``
|
def get_power_all(self):
"""Returns the power in mW for all devices"""
power_dict = {}
for device in self.get_device_names().keys():
power_dict[device] = self.get_power_single(device)
return power_dict
|
Returns the power in mW for all devices
|
def axes(self, axes):
'''Set the axes for this object's degrees of freedom.
Parameters
----------
axes : list of axes specifications
A list of axis values to set. This list must have the same number of
elements as the degrees of freedom of the underlying ODE object.
Each element can be
(a) None, which has no effect on the corresponding axis, or
(b) three floats specifying the axis to set.
'''
assert self.ADOF == len(axes) or self.LDOF == len(axes)
for i, axis in enumerate(axes):
if axis is not None:
self.ode_obj.setAxis(i, 0, axis)
|
Set the axes for this object's degrees of freedom.
Parameters
----------
axes : list of axes specifications
A list of axis values to set. This list must have the same number of
elements as the degrees of freedom of the underlying ODE object.
Each element can be
(a) None, which has no effect on the corresponding axis, or
(b) three floats specifying the axis to set.
|
def _slugify_foreign_key(schema):
"""Slugify foreign key
"""
for foreign_key in schema.get('foreignKeys', []):
foreign_key['reference']['resource'] = _slugify_resource_name(
foreign_key['reference'].get('resource', ''))
return schema
|
Slugify foreign key
|
def remove(self, id):
""" Remove pool.
"""
p = Pool.get(int(id))
p.remove()
redirect(url(controller = 'pool', action = 'list'))
|
Remove pool.
|
def page(self, from_=values.unset, to=values.unset,
date_created_on_or_before=values.unset,
date_created_after=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of FaxInstance records from the API.
Request is executed immediately
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FaxInstance
:rtype: twilio.rest.fax.v1.fax.FaxPage
"""
params = values.of({
'From': from_,
'To': to,
'DateCreatedOnOrBefore': serialize.iso8601_datetime(date_created_on_or_before),
'DateCreatedAfter': serialize.iso8601_datetime(date_created_after),
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return FaxPage(self._version, response, self._solution)
|
Retrieve a single page of FaxInstance records from the API.
Request is executed immediately
:param unicode from_: Retrieve only those faxes sent from this phone number
:param unicode to: Retrieve only those faxes sent to this phone number
:param datetime date_created_on_or_before: Retrieve only faxes created on or before this date
:param datetime date_created_after: Retrieve only faxes created after this date
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of FaxInstance
:rtype: twilio.rest.fax.v1.fax.FaxPage
|
def create_dataset(self,
name,
x_img_size,
y_img_size,
z_img_size,
x_vox_res,
y_vox_res,
z_vox_res,
x_offset=0,
y_offset=0,
z_offset=0,
scaling_levels=0,
scaling_option=0,
dataset_description="",
is_public=0):
"""
Creates a dataset.
Arguments:
name (str): Name of dataset
x_img_size (int): max x coordinate of image size
y_img_size (int): max y coordinate of image size
z_img_size (int): max z coordinate of image size
x_vox_res (float): x voxel resolution
y_vox_res (float): y voxel resolution
z_vox_res (float): z voxel resolution
x_offset (int): x offset amount
y_offset (int): y offset amount
z_offset (int): z offset amount
scaling_levels (int): Level of resolution scaling
scaling_option (int): Z slices is 0 or Isotropic is 1
dataset_description (str): Your description of the dataset
is_public (int): 1 'true' or 0 'false' for viewability of data set
in public
Returns:
bool: True if dataset created, False if not
"""
url = self.url() + "/resource/dataset/{}".format(name)
json = {
"dataset_name": name,
"ximagesize": x_img_size,
"yimagesize": y_img_size,
"zimagesize": z_img_size,
"xvoxelres": x_vox_res,
"yvoxelres": y_vox_res,
"zvoxelres": z_vox_res,
"xoffset": x_offset,
"yoffset": y_offset,
"zoffset": z_offset,
"scalinglevels": scaling_levels,
"scalingoption": scaling_option,
"dataset_description": dataset_description,
"public": is_public
}
req = self.remote_utils.post_url(url, json=json)
if req.status_code is not 201:
raise RemoteDataUploadError('Could not upload {}'.format(req.text))
if req.content == "" or req.content == b'':
return True
else:
return False
|
Creates a dataset.
Arguments:
name (str): Name of dataset
x_img_size (int): max x coordinate of image size
y_img_size (int): max y coordinate of image size
z_img_size (int): max z coordinate of image size
x_vox_res (float): x voxel resolution
y_vox_res (float): y voxel resolution
z_vox_res (float): z voxel resolution
x_offset (int): x offset amount
y_offset (int): y offset amount
z_offset (int): z offset amount
scaling_levels (int): Level of resolution scaling
scaling_option (int): Z slices is 0 or Isotropic is 1
dataset_description (str): Your description of the dataset
is_public (int): 1 'true' or 0 'false' for viewability of data set
in public
Returns:
bool: True if dataset created, False if not
|
def applet_validate_batch(object_id, input_params={}, always_retry=True, **kwargs):
"""
Invokes the /applet-xxxx/validateBatch API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2FvalidateBatch
"""
return DXHTTPRequest('/%s/validateBatch' % object_id, input_params, always_retry=always_retry, **kwargs)
|
Invokes the /applet-xxxx/validateBatch API method.
For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Applets-and-Entry-Points#API-method%3A-%2Fapplet-xxxx%2FvalidateBatch
|
def close_shell(self, shell_id):
"""
Close the shell
@param string shell_id: The shell id on the remote machine.
See #open_shell
@returns This should have more error checking but it just returns true
for now.
@rtype bool
"""
message_id = uuid.uuid4()
req = {'env:Envelope': self._get_soap_header(
resource_uri='http://schemas.microsoft.com/wbem/wsman/1/windows/shell/cmd', # NOQA
action='http://schemas.xmlsoap.org/ws/2004/09/transfer/Delete',
shell_id=shell_id,
message_id=message_id)}
# SOAP message requires empty env:Body
req['env:Envelope'].setdefault('env:Body', {})
res = self.send_message(xmltodict.unparse(req))
root = ET.fromstring(res)
relates_to = next(
node for node in root.findall('.//*')
if node.tag.endswith('RelatesTo')).text
# TODO change assert into user-friendly exception
assert uuid.UUID(relates_to.replace('uuid:', '')) == message_id
|
Close the shell
@param string shell_id: The shell id on the remote machine.
See #open_shell
@returns This should have more error checking but it just returns true
for now.
@rtype bool
|
def request(self, method, url, params=None, data=None, headers=None, auth=None, timeout=None,
allow_redirects=False):
"""
Make an HTTP Request with parameters provided.
:param str method: The HTTP method to use
:param str url: The URL to request
:param dict params: Query parameters to append to the URL
:param dict data: Parameters to go in the body of the HTTP request
:param dict headers: HTTP Headers to send with the request
:param tuple auth: Basic Auth arguments
:param float timeout: Socket/Read timeout for the request
:param boolean allow_redirects: Whether or not to allow redirects
See the requests documentation for explanation of all these parameters
:return: An http response
:rtype: A :class:`Response <twilio.rest.http.response.Response>` object
"""
kwargs = {
'method': method.upper(),
'url': url,
'params': params,
'data': data,
'headers': headers,
'auth': auth,
'hooks': self.request_hooks
}
if params:
_logger.info('{method} Request: {url}?{query}'.format(query=urlencode(params), **kwargs))
_logger.info('PARAMS: {params}'.format(**kwargs))
else:
_logger.info('{method} Request: {url}'.format(**kwargs))
if data:
_logger.info('PAYLOAD: {data}'.format(**kwargs))
self.last_response = None
session = self.session or Session()
request = Request(**kwargs)
self.last_request = TwilioRequest(**kwargs)
prepped_request = session.prepare_request(request)
response = session.send(
prepped_request,
allow_redirects=allow_redirects,
timeout=timeout,
)
_logger.info('{method} Response: {status} {text}'.format(method=method, status=response.status_code, text=response.text))
self.last_response = Response(int(response.status_code), response.text)
return self.last_response
|
Make an HTTP Request with parameters provided.
:param str method: The HTTP method to use
:param str url: The URL to request
:param dict params: Query parameters to append to the URL
:param dict data: Parameters to go in the body of the HTTP request
:param dict headers: HTTP Headers to send with the request
:param tuple auth: Basic Auth arguments
:param float timeout: Socket/Read timeout for the request
:param boolean allow_redirects: Whether or not to allow redirects
See the requests documentation for explanation of all these parameters
:return: An http response
:rtype: A :class:`Response <twilio.rest.http.response.Response>` object
|
def export_analytics_data_to_excel(data, output_file_name, result_info_key, identifier_keys):
"""Creates an Excel file containing data returned by the Analytics API
Args:
data: Analytics API data as a list of dicts
output_file_name: File name for output Excel file (use .xlsx extension).
"""
workbook = create_excel_workbook(data, result_info_key, identifier_keys)
workbook.save(output_file_name)
print('Saved Excel file to {}'.format(output_file_name))
|
Creates an Excel file containing data returned by the Analytics API
Args:
data: Analytics API data as a list of dicts
output_file_name: File name for output Excel file (use .xlsx extension).
|
def get_bandstructure_by_material_id(self, material_id, line_mode=True):
"""
Get a BandStructure corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure or
https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure_uniform
Args:
material_id (str): Materials Project material_id.
line_mode (bool): If True, fetch a BandStructureSymmLine object
(default). If False, return the uniform band structure.
Returns:
A BandStructure object.
"""
prop = "bandstructure" if line_mode else "bandstructure_uniform"
data = self.get_data(material_id, prop=prop)
return data[0][prop]
|
Get a BandStructure corresponding to a material_id.
REST Endpoint: https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure or
https://www.materialsproject.org/rest/v2/materials/<mp-id>/vasp/bandstructure_uniform
Args:
material_id (str): Materials Project material_id.
line_mode (bool): If True, fetch a BandStructureSymmLine object
(default). If False, return the uniform band structure.
Returns:
A BandStructure object.
|
async def message(self, recipient: str, text: str, notice: bool=False) -> None:
"""
Lower level messaging function used by User and Channel
"""
await self._send(cc.PRIVMSG if not notice else cc.NOTICE, recipient, rest=text)
|
Lower level messaging function used by User and Channel
|
def eps(self):
"""Print the canvas to a postscript file"""
import tkFileDialog,tkMessageBox
filename=tkFileDialog.asksaveasfilename(message="save postscript to file",filetypes=['eps','ps'])
if filename is None:
return
self.postscript(file=filename)
|
Print the canvas to a postscript file
|
def from_fill_parent(cls, eg_fillProperties_parent):
"""
Return a |FillFormat| instance initialized to the settings contained
in *eg_fillProperties_parent*, which must be an element having
EG_FillProperties in its child element sequence in the XML schema.
"""
fill_elm = eg_fillProperties_parent.eg_fillProperties
fill = _Fill(fill_elm)
fill_format = cls(eg_fillProperties_parent, fill)
return fill_format
|
Return a |FillFormat| instance initialized to the settings contained
in *eg_fillProperties_parent*, which must be an element having
EG_FillProperties in its child element sequence in the XML schema.
|
def _do_classifyplot(df, out_file, title=None, size=None, samples=None, callers=None):
"""Plot using classification-based plot using seaborn.
"""
metric_labels = {"fdr": "False discovery rate",
"fnr": "False negative rate"}
metrics = [("fnr", "tpr"), ("fdr", "spc")]
is_mpl2 = LooseVersion(mpl.__version__) >= LooseVersion('2.0')
colors = ["light grey", "greyish"] * 10
data_dict = df.set_index(["sample", "caller", "vtype"]).T.to_dict()
plt.ioff()
plt.style.use('seaborn-white')
vtypes = sorted(df["vtype"].unique(), reverse=True)
if not callers:
callers = sorted(df["caller"].unique())
if not samples:
samples = sorted(df["sample"].unique())
if len(samples) >= len(callers):
cats, groups = (samples, callers)
data_dict = df.set_index(["sample", "caller", "vtype"]).T.to_dict()
else:
cats, groups = (callers, samples)
data_dict = df.set_index(["caller", "sample", "vtype"]).T.to_dict()
fig, axs = plt.subplots(len(vtypes) * len(groups), len(metrics))
fig.text(.5, .95, title if title else "", horizontalalignment='center', size=14)
for vi, vtype in enumerate(vtypes):
for gi, group in enumerate(groups):
for mi, (metric, label) in enumerate(metrics):
row_plots = axs if len(vtypes) * len(groups) == 1 else axs[vi * len(groups) + gi]
cur_plot = row_plots if len(metrics) == 1 else row_plots[mi]
vals, labels = [], []
for cat in cats:
cur_data = data_dict.get((cat, group, vtype))
if cur_data:
vals.append(cur_data[metric])
labels.append(cur_data[label])
cur_plot.barh(np.arange(len(vals)), vals, color=sns.xkcd_palette([colors[vi]]))
all_vals = []
for k, d in data_dict.items():
if k[-1] == vtype:
for m in metrics:
all_vals.append(d[m[0]])
metric_max = max(all_vals)
cur_plot.set_xlim(0, metric_max)
pad = 0.1 * metric_max
ai_adjust = 0.0 if is_mpl2 else 0.35
for ai, (val, label) in enumerate(zip(vals, labels)):
cur_plot.annotate(label, (pad + (0 if max(vals) > metric_max / 2.0 else max(vals)),
ai + ai_adjust),
va='center', size=7)
cur_plot.locator_params(nbins=len(cats) + (2 if len(cats) > 2 else 1), axis="y", tight=True)
if mi == 0:
cur_plot.tick_params(axis='y', which='major', labelsize=8)
plot_cats = ([""] + cats) if is_mpl2 else cats
plot_va = "center" if is_mpl2 else "bottom"
cur_plot.set_yticklabels(plot_cats, size=8, va=plot_va)
cur_plot.set_title("%s: %s" % (vtype, group), fontsize=12, loc="left")
else:
cur_plot.get_yaxis().set_ticks([])
if gi == len(groups) - 1:
cur_plot.tick_params(axis='x', which='major', labelsize=8)
cur_plot.get_xaxis().set_major_formatter(
mpl_ticker.FuncFormatter(lambda v, p: "%s%%" % (int(v) if round(v) == v else v)))
if vi == len(vtypes) - 1:
cur_plot.get_xaxis().set_label_text(metric_labels[metric], size=12)
else:
cur_plot.get_xaxis().set_ticks([])
cur_plot.spines['bottom'].set_visible(False)
cur_plot.spines['left'].set_visible(False)
cur_plot.spines['top'].set_visible(False)
cur_plot.spines['right'].set_visible(False)
x, y = (6, len(vtypes) * len(groups) + 1 * 0.5 * len(cats)) if size is None else size
fig.set_size_inches(x, y)
fig.tight_layout(rect=(0, 0, 1, 0.95))
plt.subplots_adjust(hspace=0.6)
fig.savefig(out_file)
|
Plot using classification-based plot using seaborn.
|
def _release_command_buffer(self, command_buffer):
"""This is called by the command buffer when it closes."""
if command_buffer.closed:
return
self._cb_poll.unregister(command_buffer.host_id)
self.connection_pool.release(command_buffer.connection)
command_buffer.connection = None
|
This is called by the command buffer when it closes.
|
def target_power(self):
"""Setting this to `True` will activate the power pins (4 and 6). If
set to `False` the power will be deactivated.
Raises an :exc:`IOError` if the hardware adapter does not support
the switchable power pins.
"""
ret = api.py_aa_target_power(self.handle, TARGET_POWER_QUERY)
_raise_error_if_negative(ret)
return ret
|
Setting this to `True` will activate the power pins (4 and 6). If
set to `False` the power will be deactivated.
Raises an :exc:`IOError` if the hardware adapter does not support
the switchable power pins.
|
def execute_code_block(compiler, block, example_globals,
script_vars, gallery_conf):
"""Executes the code block of the example file"""
blabel, bcontent, lineno = block
# If example is not suitable to run, skip executing its blocks
if not script_vars['execute_script'] or blabel == 'text':
script_vars['memory_delta'].append(0)
return ''
cwd = os.getcwd()
# Redirect output to stdout and
orig_stdout = sys.stdout
src_file = script_vars['src_file']
# First cd in the original example dir, so that any file
# created by the example get created in this directory
my_stdout = MixedEncodingStringIO()
os.chdir(os.path.dirname(src_file))
sys_path = copy.deepcopy(sys.path)
sys.path.append(os.getcwd())
sys.stdout = LoggingTee(my_stdout, logger, src_file)
try:
dont_inherit = 1
code_ast = compile(bcontent, src_file, 'exec',
ast.PyCF_ONLY_AST | compiler.flags, dont_inherit)
ast.increment_lineno(code_ast, lineno - 1)
# don't use unicode_literals at the top of this file or you get
# nasty errors here on Py2.7
_, mem = _memory_usage(_exec_once(
compiler(code_ast, src_file, 'exec'), example_globals),
gallery_conf)
except Exception:
sys.stdout.flush()
sys.stdout = orig_stdout
except_rst = handle_exception(sys.exc_info(), src_file, script_vars,
gallery_conf)
# python2.7: Code was read in bytes needs decoding to utf-8
# unless future unicode_literals is imported in source which
# make ast output unicode strings
if hasattr(except_rst, 'decode') and not \
isinstance(except_rst, unicode):
except_rst = except_rst.decode('utf-8')
code_output = u"\n{0}\n\n\n\n".format(except_rst)
# still call this even though we won't use the images so that
# figures are closed
save_figures(block, script_vars, gallery_conf)
mem = 0
else:
sys.stdout.flush()
sys.stdout = orig_stdout
sys.path = sys_path
os.chdir(cwd)
my_stdout = my_stdout.getvalue().strip().expandtabs()
if my_stdout:
stdout = CODE_OUTPUT.format(indent(my_stdout, u' ' * 4))
else:
stdout = ''
images_rst = save_figures(block, script_vars, gallery_conf)
code_output = u"\n{0}\n\n{1}\n\n".format(images_rst, stdout)
finally:
os.chdir(cwd)
sys.path = sys_path
sys.stdout = orig_stdout
script_vars['memory_delta'].append(mem)
return code_output
|
Executes the code block of the example file
|
def save_blocks(self, id_env, blocks):
"""
Save blocks from environment
:param id_env: Environment id
:param blocks: Lists of blocks in order. Ex: ['content one', 'content two', ...]
:return: None
:raise AmbienteNaoExisteError: Ambiente não cadastrado.
:raise InvalidValueError: Invalid parameter.
:raise UserNotAuthorizedError: Permissão negada.
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta.
"""
url = 'environment/save_blocks/'
map_dict = dict()
map_dict['id_env'] = id_env
map_dict['blocks'] = blocks
code, xml = self.submit({'map': map_dict}, 'POST', url)
return self.response(code, xml)
|
Save blocks from environment
:param id_env: Environment id
:param blocks: Lists of blocks in order. Ex: ['content one', 'content two', ...]
:return: None
:raise AmbienteNaoExisteError: Ambiente não cadastrado.
:raise InvalidValueError: Invalid parameter.
:raise UserNotAuthorizedError: Permissão negada.
:raise DataBaseError: Falha na networkapi ao acessar o banco de dados.
:raise XMLError: Falha na networkapi ao ler o XML de requisição ou gerar o XML de resposta.
|
def _work_chain_mod_time(self, worker_name):
""" Internal: We compute a modification time of a work chain.
Returns:
The newest modification time of any worker in the work chain.
"""
# Bottom out on sample, info or tags
if worker_name=='sample' or worker_name=='info' or worker_name=='tags':
return datetime.datetime(1970, 1, 1)
my_mod_time = self._get_work_results('info', worker_name)['info']['mod_time']
dependencies = self.plugin_meta[worker_name]['dependencies']
if not dependencies:
return my_mod_time
else:
depend_mod_times = [my_mod_time]
for depend in dependencies:
depend_mod_times.append(self._work_chain_mod_time(depend))
return max(depend_mod_times)
|
Internal: We compute a modification time of a work chain.
Returns:
The newest modification time of any worker in the work chain.
|
def qteBindKeyGlobal(self, keysequence, macroName: str):
"""
Associate ``macroName`` with ``keysequence`` in all current
applets.
This method will bind ``macroName`` to ``keysequence`` in the
global key map and **all** local key maps. This also applies
for all applets (and their constituent widgets) yet to be
instantiated because they will inherit a copy of the global
keymap.
.. note:: This binding is signature independent.
If the ``macroName`` was not registered the method returns
**False**.
The ``keysequence`` can be specified either as a string (eg
'<ctrl>+x <ctrl>+f'), or a list of tuples containing the
constants from the ``QtCore.Qt`` name space
(eg. [(ControlModifier, Key_X), (ControlModifier, Key_F)]), or
as a ``QtmacsKeysequence`` object.
|Args|
* ``keysequence`` (**str**, **list** of **tuples**,
**QtmacsKeysequence**): key sequence to activate ``macroName``
for specified ``widgetSignature``.
* ``macroName`` (**str**): name of macro to associate with
``keysequence``.
|Returns|
**bool**: **True** if the binding was successful.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
* **QtmacsKeysequenceError** if the provided ``keysequence``
could not be parsed.
"""
# Convert the key sequence into a QtmacsKeysequence object, or
# raise an QtmacsOtherError if the conversion is impossible.
keysequence = QtmacsKeysequence(keysequence)
# Sanity check: the macro must have been registered
# beforehand.
if not self.qteIsMacroRegistered(macroName):
msg = 'Cannot globally bind key to unknown macro <b>{}</b>.'
msg = msg.format(macroName)
self.qteLogger.error(msg, stack_info=True)
return False
# Insert/overwrite the key sequence and associate it with the
# new macro.
self._qteGlobalKeyMap.qteInsertKey(keysequence, macroName)
# Now update the local key map of every applet. Note that
# globally bound macros apply to every applet (hence the loop
# below) and every widget therein (hence the "*" parameter for
# the widget signature).
for app in self._qteAppletList:
self.qteBindKeyApplet(keysequence, macroName, app)
return True
|
Associate ``macroName`` with ``keysequence`` in all current
applets.
This method will bind ``macroName`` to ``keysequence`` in the
global key map and **all** local key maps. This also applies
for all applets (and their constituent widgets) yet to be
instantiated because they will inherit a copy of the global
keymap.
.. note:: This binding is signature independent.
If the ``macroName`` was not registered the method returns
**False**.
The ``keysequence`` can be specified either as a string (eg
'<ctrl>+x <ctrl>+f'), or a list of tuples containing the
constants from the ``QtCore.Qt`` name space
(eg. [(ControlModifier, Key_X), (ControlModifier, Key_F)]), or
as a ``QtmacsKeysequence`` object.
|Args|
* ``keysequence`` (**str**, **list** of **tuples**,
**QtmacsKeysequence**): key sequence to activate ``macroName``
for specified ``widgetSignature``.
* ``macroName`` (**str**): name of macro to associate with
``keysequence``.
|Returns|
**bool**: **True** if the binding was successful.
|Raises|
* **QtmacsArgumentError** if at least one argument has an invalid type.
* **QtmacsKeysequenceError** if the provided ``keysequence``
could not be parsed.
|
def DbAddDevice(self, argin):
""" Add a Tango class device to a specific device server
:param argin: Str[0] = Full device server process name
Str[1] = Device name
Str[2] = Tango class name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid """
self._log.debug("In DbAddDevice()")
if len(argin) < 3:
self.warn_stream("DataBase::AddDevice(): incorrect number of input arguments ")
th_exc(DB_IncorrectArguments,
"incorrect no. of input arguments, needs at least 3 (server,device,class)",
"DataBase::AddDevice()")
self.info_stream("DataBase::AddDevice(): insert %s server with device %s",argin[0],argin[1])
server_name, d_name, klass_name = argin[:3]
if len(argin) > 3:
alias = argin[3]
else:
alias = None
ret, dev_name, dfm = check_device_name(d_name)
if not ret:
th_exc(DB_IncorrectDeviceName,
"device name (" + d_name + ") syntax error (should be [tango:][//instance/]domain/family/member)",
"DataBase::AddDevice()")
# Lock table
self.db.add_device(server_name, (dev_name, dfm) , klass_name, alias=alias)
|
Add a Tango class device to a specific device server
:param argin: Str[0] = Full device server process name
Str[1] = Device name
Str[2] = Tango class name
:type: tango.DevVarStringArray
:return:
:rtype: tango.DevVoid
|
def ce(actual, predicted):
"""
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
"""
return (sum([1.0 for x,y in zip(actual,predicted) if x != y]) /
len(actual))
|
Computes the classification error.
This function computes the classification error between two lists
Parameters
----------
actual : list
A list of the true classes
predicted : list
A list of the predicted classes
Returns
-------
score : double
The classification error between actual and predicted
|
def _set_matplotlib_default_backend():
"""
matplotlib will try to print to a display if it is available, but don't want
to run it in interactive mode. we tried setting the backend to 'Agg'' before
importing, but it was still resulting in issues. we replace the existing
backend with 'agg' in the default matplotlibrc. This is a hack until we can
find a better solution
"""
if _matplotlib_installed():
import matplotlib
matplotlib.use('Agg', force=True)
config = matplotlib.matplotlib_fname()
if os.access(config, os.W_OK):
with file_transaction(config) as tx_out_file:
with open(config) as in_file, open(tx_out_file, "w") as out_file:
for line in in_file:
if line.split(":")[0].strip() == "backend":
out_file.write("backend: agg\n")
else:
out_file.write(line)
|
matplotlib will try to print to a display if it is available, but don't want
to run it in interactive mode. we tried setting the backend to 'Agg'' before
importing, but it was still resulting in issues. we replace the existing
backend with 'agg' in the default matplotlibrc. This is a hack until we can
find a better solution
|
def normalize_bbox(coords, ymax, scaler=2):
"""
scales all coordinates and flip y axis due to different
origin coordinates (top left vs. bottom left)
"""
return [
coords[0] * scaler,
ymax - (coords[3] * scaler),
coords[2] * scaler,
ymax - (coords[1] * scaler),
]
|
scales all coordinates and flip y axis due to different
origin coordinates (top left vs. bottom left)
|
def __parse_enrollments(self, user):
"""Parse user enrollments"""
enrollments = []
for company in user['companies']:
name = company['company_name']
org = self._organizations.get(name, None)
if not org:
org = Organization(name=name)
self._organizations[name] = org
start_date = MIN_PERIOD_DATE
end_date = MAX_PERIOD_DATE
if company['end_date']:
end_date = str_to_datetime(company['end_date'])
rol = Enrollment(start=start_date, end=end_date,
organization=org)
enrollments.append(rol)
return enrollments
|
Parse user enrollments
|
def read(self):
"""Read stdout and stdout pipes if process is no longer running."""
if self._process and self._process.poll() is not None:
ip = get_ipython()
err = ip.user_ns['error'].read().decode()
out = ip.user_ns['output'].read().decode()
else:
out = ''
err = ''
return out, err
|
Read stdout and stdout pipes if process is no longer running.
|
def unwrap(lines, max_wrap_lines, min_header_lines, min_quoted_lines):
"""
Returns a tuple of:
- Type ('forward', 'reply', 'headers', 'quoted')
- Range of the text at the top of the wrapped message (or None)
- Headers dict (or None)
- Range of the text of the wrapped message (or None)
- Range of the text below the wrapped message (or None)
- Whether the wrapped text needs to be unindented
"""
headers = {}
# Get line number and wrapping type.
start, end, typ = find_unwrap_start(lines, max_wrap_lines, min_header_lines, min_quoted_lines)
# We found a line indicating that it's a forward/reply.
if typ in ('forward', 'reply'):
main_type = typ
if typ == 'reply':
reply_headers = parse_reply(join_wrapped_lines(lines[start:end+1]))
if reply_headers:
headers.update(reply_headers)
# Find where the headers or the quoted section starts.
# We can set min_quoted_lines to 1 because we expect a quoted section.
start2, end2, typ = find_unwrap_start(lines[end+1:], max_wrap_lines, min_header_lines, 1)
if typ == 'quoted':
# Quoted section starts. Unindent and check if there are headers.
quoted_start = end+1+start2
unquoted = unindent_lines(lines[quoted_start:])
rest_start = quoted_start + len(unquoted)
start3, end3, typ = find_unwrap_start(unquoted, max_wrap_lines, min_header_lines, min_quoted_lines)
if typ == 'headers':
hdrs, hdrs_length = extract_headers(unquoted[start3:], max_wrap_lines)
if hdrs:
headers.update(hdrs)
rest2_start = quoted_start+start3+hdrs_length
return main_type, (0, start), headers, (rest2_start, rest_start), (rest_start, None), True
else:
return main_type, (0, start), headers, (quoted_start, rest_start), (rest_start, None), True
elif typ == 'headers':
hdrs, hdrs_length = extract_headers(lines[start+1:], max_wrap_lines)
if hdrs:
headers.update(hdrs)
rest_start = start + 1 + hdrs_length
return main_type, (0, start), headers, (rest_start, None), None, False
else:
# Didn't find quoted section or headers, assume that everything
# below is the qouted text.
return main_type, (0, start), headers, (start+(start2 or 0)+1, None), None, False
# We just found headers, which usually indicates a forwarding.
elif typ == 'headers':
main_type = 'forward'
hdrs, hdrs_length = extract_headers(lines[start:], max_wrap_lines)
rest_start = start + hdrs_length
return main_type, (0, start), hdrs, (rest_start, None), None, False
# We found quoted text. Headers may be within the quoted text.
elif typ == 'quoted':
unquoted = unindent_lines(lines[start:])
rest_start = start + len(unquoted)
start2, end2, typ = find_unwrap_start(unquoted, max_wrap_lines, min_header_lines, min_quoted_lines)
if typ == 'headers':
main_type = 'forward'
hdrs, hdrs_length = extract_headers(unquoted[start2:], max_wrap_lines)
rest2_start = start + hdrs_length
return main_type, (0, start), hdrs, (rest2_start, rest_start), (rest_start, None), True
else:
main_type = 'quote'
return main_type, (None, start), None, (start, rest_start), (rest_start, None), True
|
Returns a tuple of:
- Type ('forward', 'reply', 'headers', 'quoted')
- Range of the text at the top of the wrapped message (or None)
- Headers dict (or None)
- Range of the text of the wrapped message (or None)
- Range of the text below the wrapped message (or None)
- Whether the wrapped text needs to be unindented
|
def ensemble_simulations(
t, y0=None, volume=1.0, model=None, solver='ode',
is_netfree=False, species_list=None, without_reset=False,
return_type='matplotlib', opt_args=(), opt_kwargs=None,
structures=None, rndseed=None,
n=1, nproc=None, method=None, errorbar=True,
**kwargs):
"""
Run simulations multiple times and return its ensemble.
Arguments are almost same with ``ecell4.util.simulation.run_simulation``.
`observers` and `progressbar` is not available here.
Parameters
----------
n : int, optional
A number of runs. Default is 1.
nproc : int, optional
A number of processors. Ignored when method='serial'.
Default is None.
method : str, optional
The way for running multiple jobs.
Choose one from 'serial', 'multiprocessing', 'sge', 'slurm', 'azure'.
Default is None, which works as 'serial'.
**kwargs : dict, optional
Optional keyword arugments are passed through to `run_serial`,
`run_sge`, or `run_multiprocessing`.
See each function for more details.
Returns
-------
value : list, DummyObserver, or None
Return a value suggested by ``return_type``.
When ``return_type`` is 'array', return a time course data.
When ``return_type`` is 'observer', return a DummyObserver.
DummyObserver is a wrapper, which has the almost same interface
with NumberObservers.
Return nothing if else.
See Also
--------
ecell4.util.simulation.run_simulation
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
"""
y0 = y0 or {}
opt_kwargs = opt_kwargs or {}
structures = structures or {}
for key, value in kwargs.items():
if key == 'r':
return_type = value
elif key == 'v':
volume = value
elif key == 's':
solver = value
elif key == 'm':
model = value
if model is None:
model = ecell4.util.decorator.get_model(is_netfree, without_reset)
if species_list is None:
species_list = list_species(model, y0.keys())
if rndseed is None:
myseed = genseeds(n)
elif (not isinstance(rndseed, bytes) or len(rndseed) != n * 4 * 2):
raise ValueError(
"A wrong seed for the random number generation was given. Use 'genseeds'.")
jobs = [{'t': t, 'y0': y0, 'volume': volume, 'model': model, 'solver': solver, 'species_list': species_list, 'structures': structures, 'myseed': myseed}]
if method is None or method.lower() == "serial":
retval = run_serial(singlerun, jobs, n=n, **kwargs)
elif method.lower() == "sge":
retval = run_sge(singlerun, jobs, n=n, nproc=nproc, **kwargs)
elif method.lower() == "slurm":
retval = run_slurm(singlerun, jobs, n=n, nproc=nproc, **kwargs)
elif method.lower() == "multiprocessing":
retval = run_multiprocessing(singlerun, jobs, n=n, nproc=nproc, **kwargs)
elif method.lower() == "azure":
retval = run_azure(singlerun, jobs, n=n, nproc=nproc, **kwargs)
else:
raise ValueError(
'Argument "method" must be one of "serial", "multiprocessing", "slurm" and "sge".')
if return_type is None or return_type in ("none", ):
return
assert len(retval) == len(jobs) == 1
if return_type in ("array", 'a'):
return retval[0]
import numpy
class DummyObserver:
def __init__(self, inputs, species_list, errorbar=True):
if len(inputs) == 0:
raise ValueError("No input was given.")
t = numpy.array(inputs[0], numpy.float64).T[0]
mean = sum([numpy.array(data, numpy.float64).T[1: ] for data in inputs])
mean /= len(inputs)
self.__data = numpy.vstack([t, mean]).T
if errorbar:
var = sum([(numpy.array(data, numpy.float64).T[1: ] - mean) ** 2
for data in inputs]) / len(inputs)
stdev = numpy.sqrt(var)
stder = stdev / numpy.sqrt(len(inputs))
# self.__error = numpy.vstack([t, stdev]).T
self.__error = numpy.vstack([t, stder]).T
else:
self.__error = None
self.__species_list = [ecell4_base.core.Species(serial) for serial in species_list]
def targets(self):
return self.__species_list
def data(self):
return self.__data
def t(self):
return self.__data.T[0]
def error(self):
return self.__error
def save(self, filename):
with open(filename, 'w') as fout:
writer = csv.writer(fout, delimiter=',', lineterminator='\n')
writer.writerow(['"{}"'.format(sp.serial()) for sp in self.__species_list])
writer.writerows(self.data())
if return_type in ("matplotlib", 'm'):
if isinstance(opt_args, (list, tuple)):
ecell4.util.viz.plot_number_observer_with_matplotlib(
DummyObserver(retval[0], species_list, errorbar), *opt_args, **opt_kwargs)
elif isinstance(opt_args, dict):
# opt_kwargs is ignored
ecell4.util.viz.plot_number_observer_with_matplotlib(
DummyObserver(retval[0], species_list, errorbar), **opt_args)
else:
raise ValueError('opt_args [{}] must be list or dict.'.format(
repr(opt_args)))
elif return_type in ("nyaplot", 'n'):
if isinstance(opt_args, (list, tuple)):
ecell4.util.viz.plot_number_observer_with_nya(
DummyObserver(retval[0], species_list, errorbar), *opt_args, **opt_kwargs)
elif isinstance(opt_args, dict):
# opt_kwargs is ignored
ecell4.util.viz.plot_number_observer_with_nya(
DummyObserver(retval[0], species_list, errorbar), **opt_args)
else:
raise ValueError('opt_args [{}] must be list or dict.'.format(
repr(opt_args)))
elif return_type in ("observer", 'o'):
return DummyObserver(retval[0], species_list, errorbar)
elif return_type in ("dataframe", 'd'):
import pandas
return [
pandas.concat([
pandas.DataFrame(dict(Time=numpy.array(data).T[0],
Value=numpy.array(data).T[i + 1],
Species=serial))
for i, serial in enumerate(species_list)])
for data in retval[0]]
else:
raise ValueError(
'An invald value for "return_type" was given [{}].'.format(str(return_type))
+ 'Use "none" if you need nothing to be returned.')
|
Run simulations multiple times and return its ensemble.
Arguments are almost same with ``ecell4.util.simulation.run_simulation``.
`observers` and `progressbar` is not available here.
Parameters
----------
n : int, optional
A number of runs. Default is 1.
nproc : int, optional
A number of processors. Ignored when method='serial'.
Default is None.
method : str, optional
The way for running multiple jobs.
Choose one from 'serial', 'multiprocessing', 'sge', 'slurm', 'azure'.
Default is None, which works as 'serial'.
**kwargs : dict, optional
Optional keyword arugments are passed through to `run_serial`,
`run_sge`, or `run_multiprocessing`.
See each function for more details.
Returns
-------
value : list, DummyObserver, or None
Return a value suggested by ``return_type``.
When ``return_type`` is 'array', return a time course data.
When ``return_type`` is 'observer', return a DummyObserver.
DummyObserver is a wrapper, which has the almost same interface
with NumberObservers.
Return nothing if else.
See Also
--------
ecell4.util.simulation.run_simulation
ecell4.extra.ensemble.run_serial
ecell4.extra.ensemble.run_sge
ecell4.extra.ensemble.run_slurm
ecell4.extra.ensemble.run_multiprocessing
ecell4.extra.ensemble.run_azure
|
def wait(self, pattern, seconds=None):
""" Searches for an image pattern in the given region, given a specified timeout period
Functionally identical to find(). If a number is passed instead of a pattern,
just waits the specified number of seconds.
Sikuli supports OCR search with a text parameter. This does not (yet).
"""
if isinstance(pattern, (int, float)):
if pattern == FOREVER:
while True:
time.sleep(1) # Infinite loop
time.sleep(pattern)
return None
if seconds is None:
seconds = self.autoWaitTimeout
findFailedRetry = True
timeout = time.time() + seconds
while findFailedRetry:
while True:
match = self.exists(pattern)
if match:
return match
if time.time() >= timeout:
break
path = pattern.path if isinstance(pattern, Pattern) else pattern
findFailedRetry = self._raiseFindFailed("Could not find pattern '{}'".format(path))
if findFailedRetry:
time.sleep(self._repeatWaitTime)
return None
|
Searches for an image pattern in the given region, given a specified timeout period
Functionally identical to find(). If a number is passed instead of a pattern,
just waits the specified number of seconds.
Sikuli supports OCR search with a text parameter. This does not (yet).
|
def get_train_eval_files(input_dir):
"""Get preprocessed training and eval files."""
data_dir = _get_latest_data_dir(input_dir)
train_pattern = os.path.join(data_dir, 'train*.tfrecord.gz')
eval_pattern = os.path.join(data_dir, 'eval*.tfrecord.gz')
train_files = file_io.get_matching_files(train_pattern)
eval_files = file_io.get_matching_files(eval_pattern)
return train_files, eval_files
|
Get preprocessed training and eval files.
|
def try_instance_init(self, instance, late_start=False):
"""Try to "initialize" the given module instance.
:param instance: instance to init
:type instance: object
:param late_start: If late_start, don't look for last_init_try
:type late_start: bool
:return: True on successful init. False if instance init method raised any Exception.
:rtype: bool
"""
try:
instance.init_try += 1
# Maybe it's a retry
if not late_start and instance.init_try > 1:
# Do not try until too frequently, or it's too loopy
if instance.last_init_try > time.time() - MODULE_INIT_PERIOD:
logger.info("Too early to retry initialization, retry period is %d seconds",
MODULE_INIT_PERIOD)
# logger.info("%s / %s", instance.last_init_try, time.time())
return False
instance.last_init_try = time.time()
logger.info("Trying to initialize module: %s", instance.name)
# If it's an external module, create/update Queues()
if instance.is_external:
instance.create_queues(self.daemon.sync_manager)
# The module instance init function says if initialization is ok
if not instance.init():
logger.warning("Module %s initialisation failed.", instance.name)
return False
logger.info("Module %s is initialized.", instance.name)
except Exception as exp: # pylint: disable=broad-except
# pragma: no cover, simple protection
msg = "The module instance %s raised an exception " \
"on initialization: %s, I remove it!" % (instance.name, str(exp))
self.configuration_errors.append(msg)
logger.error(msg)
logger.exception(exp)
return False
return True
|
Try to "initialize" the given module instance.
:param instance: instance to init
:type instance: object
:param late_start: If late_start, don't look for last_init_try
:type late_start: bool
:return: True on successful init. False if instance init method raised any Exception.
:rtype: bool
|
def register_post_execute(self, func):
"""Register a function for calling after code execution.
"""
if not callable(func):
raise ValueError('argument %s must be callable' % func)
self._post_execute[func] = True
|
Register a function for calling after code execution.
|
def score(self, context, models, revids):
"""
Genetate scores for model applied to a sequence of revisions.
:Parameters:
context : str
The name of the context -- usually the database name of a wiki
models : `iterable`
The names of a models to apply
revids : `iterable`
A sequence of revision IDs to score.
"""
if isinstance(revids, int):
rev_ids = [revids]
else:
rev_ids = [int(rid) for rid in revids]
return self._score(context, models, rev_ids)
|
Genetate scores for model applied to a sequence of revisions.
:Parameters:
context : str
The name of the context -- usually the database name of a wiki
models : `iterable`
The names of a models to apply
revids : `iterable`
A sequence of revision IDs to score.
|
def mysql(
self,
tableNamePrefix="TNS",
dirPath=None):
"""*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
"""
if dirPath:
p = self._file_prefix()
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_sources` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`TNSName` varchar(20) DEFAULT NULL,
`dateCreated` datetime DEFAULT NULL,
`decDeg` double DEFAULT NULL,
`decSex` varchar(45) DEFAULT NULL,
`discDate` datetime DEFAULT NULL,
`discMag` double DEFAULT NULL,
`discMagFilter` varchar(45) DEFAULT NULL,
`discSurvey` varchar(100) DEFAULT NULL,
`discoveryName` varchar(100) DEFAULT NULL,
`objectUrl` varchar(200) DEFAULT NULL,
`raDeg` double DEFAULT NULL,
`raSex` varchar(45) DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`hostName` VARCHAR(100) NULL DEFAULT NULL,
`hostRedshift` DOUBLE NULL DEFAULT NULL,
`survey` VARCHAR(100) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid` (`TNSId`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources", filepath=dirPath + "/" + p + "sources.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_photometry` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(20) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`filter` varchar(100) DEFAULT NULL,
`limitingMag` tinyint(4) DEFAULT NULL,
`mag` double DEFAULT NULL,
`magErr` double DEFAULT NULL,
`magUnit` varchar(100) DEFAULT NULL,
`objectName` varchar(100) DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`suggestedType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE INDEX `u_tnsid_survey_obsdate` (`TNSId` ASC, `survey` ASC, `obsdate` ASC),
UNIQUE INDEX `u_tnsid_obsdate_objname` (`TNSId` ASC, `obsdate` ASC, `objectName` ASC)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlPhot = self.photResults.mysql(
tableNamePrefix + "_photometry", filepath=dirPath + "/" + p + "phot.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_spectra` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(45) NOT NULL,
`TNSuser` varchar(45) DEFAULT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`exptime` double DEFAULT NULL,
`obsdate` datetime DEFAULT NULL,
`reportAddedDate` datetime DEFAULT NULL,
`specType` varchar(100) DEFAULT NULL,
`survey` varchar(100) DEFAULT NULL,
`telescope` varchar(100) DEFAULT NULL,
`transRedshift` double DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`remarks` VARCHAR(800) NULL DEFAULT NULL,
`sourceComment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `u_tnsid_survey_obsdate` (`TNSId`,`survey`,`obsdate`),
UNIQUE KEY `u_id_user_obsdate` (`TNSId`,`TNSuser`,`obsdate`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlSpec = self.specResults.mysql(
tableNamePrefix + "_spectra", filepath=dirPath + "/" + p + "spec.sql", createStatement=createStatement)
createStatement = """
CREATE TABLE `%(tableNamePrefix)s_files` (
`primaryId` bigint(20) NOT NULL AUTO_INCREMENT COMMENT 'An internal counter',
`TNSId` varchar(100) NOT NULL,
`dateCreated` datetime DEFAULT CURRENT_TIMESTAMP,
`dateObs` datetime DEFAULT NULL,
`filename` varchar(200) DEFAULT NULL,
`spec1phot2` tinyint(4) DEFAULT NULL,
`url` varchar(800) DEFAULT NULL,
`updated` tinyint(4) DEFAULT '0',
`dateLastModified` datetime DEFAULT NULL,
`comment` VARCHAR(800) NULL DEFAULT NULL,
PRIMARY KEY (`primaryId`),
UNIQUE KEY `tnsid_url` (`TNSId`,`url`)
) ENGINE=MyISAM AUTO_INCREMENT=0 DEFAULT CHARSET=latin1;
""" % locals()
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files", filepath=dirPath + "/" + p + "relatedFiles.sql", createStatement=createStatement)
else:
mysqlSources = self.sourceResults.mysql(
tableNamePrefix + "_sources")
mysqlPhot = self.photResults.mysql(tableNamePrefix + "_photometry")
mysqlSpec = self.specResults.mysql(tableNamePrefix + "_spectra")
mysqlFiles = self.relatedFilesResults.mysql(
tableNamePrefix + "_files")
return mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles
|
*Render the results as MySQL Insert statements*
**Key Arguments:**
- ``tableNamePrefix`` -- the prefix for the database table names to assign the insert statements to. Default *TNS*.
- ``dirPath`` -- the path to the directory to save the rendered results to. Default *None*
**Return:**
- `mysqlSources` -- the top-level transient data
- `mysqlPhot` -- all photometry associated with the transients
- `mysqlSpec` -- all spectral data associated with the transients
- `mysqlFiles` -- all files associated with the matched transients found on the tns
**Usage:**
To render the results in mysql insert format:
.. code-block:: python
mysqlSources, mysqlPhot, mysqlSpec, mysqlFiles = tns.mysql("TNS")
print mysqlSources
.. code-block:: text
INSERT INTO `TNS_sources` (TNSId,TNSName,dateCreated,decDeg,decSex,discDate,discMag,discMagFilter,discSurvey,discoveryName,hostName,hostRedshift,objectUrl,raDeg,raSex,separationArcsec,separationEastArcsec,separationNorthArcsec,specType,transRedshift) VALUES ("2016asf" ,"SN2016asf" ,"2016-09-20T11:22:13" ,"31.1126" ,"+31:06:45.36" ,"2016-03-06 08:09:36" ,"17.1" ,"V-Johnson" ,"ASAS-SN" ,"ASASSN-16cs" ,"KUG 0647+311" ,null ,"http://wis-tns.weizmann.ac.il/object/2016asf" ,"102.653041667" ,"06:50:36.73" ,"0.66" ,"-0.13" ,"0.65" ,"SN Ia" ,"0.021") ON DUPLICATE KEY UPDATE TNSId="2016asf", TNSName="SN2016asf", dateCreated="2016-09-20T11:22:13", decDeg="31.1126", decSex="+31:06:45.36", discDate="2016-03-06 08:09:36", discMag="17.1", discMagFilter="V-Johnson", discSurvey="ASAS-SN", discoveryName="ASASSN-16cs", hostName="KUG 0647+311", hostRedshift=null, objectUrl="http://wis-tns.weizmann.ac.il/object/2016asf", raDeg="102.653041667", raSex="06:50:36.73", separationArcsec="0.66", separationEastArcsec="-0.13", separationNorthArcsec="0.65", specType="SN Ia", transRedshift="0.021", updated=1, dateLastModified=NOW() ;
You can save the results to file by passing in a directory path within which to save the files to. The four flavours of data (sources, photometry, spectra and files) are saved to separate files but all data can be assoicated with its transient source using the transient's unique `TNSId`.
.. code-block:: python
tns.mysql("TNS", "~/tns")
.. image:: https://i.imgur.com/CozySPW.png
:width: 800px
:alt: mysql output
|
def _set_parameters(self, parameters):
"""Sort out the various possible parameter inputs and return a config
object (dict)
We have multiple input formats:
1) a list, tuple, or numpy.ndarray, containing the linear parameters
in the following order:
* for single term: rho0, m1, tau1, c1
* for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...
2) a dictionary with the entries "rho0", "m", "tau", "c"
2b) if the dictionary entries for "m", "tau", and "c" are lists, the
entries correspond to mulitple polarisazion terms
"""
nr_f = self.f.size
# sort out parameters
rho0, m, tau, c = self._sort_parameters(parameters)
newsize = (nr_f, len(m))
# rho0_resized = np.resize(rho0, newsize)
m_resized = np.resize(m, newsize)
tau_resized = np.resize(tau, newsize)
c_resized = np.resize(c, newsize)
omega = np.atleast_2d(2 * np.pi * self.f).T
self.w = np.resize(omega, (len(m), nr_f)).T
self.rho0 = rho0
self.m = m_resized
self.tau = tau_resized
self.c = c_resized
# compute some common terms
self.otc = (self.w * self.tau) ** self.c
self.otc2 = (self.w * self.tau) ** (2 * self.c)
self.ang = self.c * np.pi / 2.0 # rad
self.denom = 1 + 2 * self.otc * np.cos(self.ang) + self.otc2
|
Sort out the various possible parameter inputs and return a config
object (dict)
We have multiple input formats:
1) a list, tuple, or numpy.ndarray, containing the linear parameters
in the following order:
* for single term: rho0, m1, tau1, c1
* for multiple termss: rho0, m1, m2, ..., tau1, tau2, ..., c1, c2, ...
2) a dictionary with the entries "rho0", "m", "tau", "c"
2b) if the dictionary entries for "m", "tau", and "c" are lists, the
entries correspond to mulitple polarisazion terms
|
def viewbox_key_event(self, event):
"""ViewBox key event handler
Parameters
----------
event : instance of Event
The event.
"""
PerspectiveCamera.viewbox_key_event(self, event)
if event.handled or not self.interactive:
return
# Ensure the timer runs
if not self._timer.running:
self._timer.start()
if event.key in self._keymap:
val_dims = self._keymap[event.key]
val = val_dims[0]
# Brake or accelarate?
if val == 0:
vec = self._brake
val = 1
else:
vec = self._acc
# Set
if event.type == 'key_release':
val = 0
for dim in val_dims[1:]:
factor = 1.0
vec[dim-1] = val * factor
|
ViewBox key event handler
Parameters
----------
event : instance of Event
The event.
|
def send_photo(self, *args, **kwargs):
"""See :func:`send_photo`"""
return send_photo(*args, **self._merge_overrides(**kwargs)).run()
|
See :func:`send_photo`
|
def execute_update(args):
"""Execute the update based on command line args and returns a dictionary
with 'execution result, ''response code', 'response info' and
'process friendly message'.
"""
provider_class = getattr(dnsupdater,
dnsupdater.AVAILABLE_PLUGINS.get(args.provider))
updater_options = {}
process_message = None
auth = None
if args.store: # --store argument
if provider_class.auth_type == 'T':
user_arg = args.usertoken or utils.read_input(
"Paste your auth token: ")
auth = authinfo.ApiAuth(usertoken=user_arg)
else:
user_arg = args.usertoken or utils.read_input(
"Type your username: ")
pass_arg = args.password or getpass.getpass("Type your password: ")
auth = authinfo.ApiAuth(user_arg, pass_arg)
authinfo.store(auth, args.provider, args.config)
exec_result = EXECUTION_RESULT_OK
if not args.hostname:
update_ddns = False
process_message = "Auth info stored."
else:
update_ddns = True
# informations arguments
elif args.usertoken and args.hostname:
if provider_class.auth_type == 'T':
auth = authinfo.ApiAuth(args.usertoken)
else:
auth = authinfo.ApiAuth(args.usertoken, args.password)
update_ddns = True
exec_result = EXECUTION_RESULT_OK
elif args.hostname:
if authinfo.exists(args.provider, args.config):
auth = authinfo.load(args.provider, args.config)
update_ddns = True
exec_result = EXECUTION_RESULT_OK
else:
update_ddns = False
exec_result = EXECUTION_RESULT_NOK
process_message = "No stored auth information found for " \
"provider: '%s'" % args.provider
else: # no arguments
update_ddns = False
exec_result = EXECUTION_RESULT_NOK
process_message = "Warning: The hostname to be updated must be " \
"provided.\nUsertoken and password can be either " \
"provided via command line or stored with --store " \
"option.\nExecute noipy --help for more details."
if update_ddns and args.provider == 'generic':
if args.url:
if not URL_RE.match(args.url):
process_message = "Malformed URL."
exec_result = EXECUTION_RESULT_NOK
update_ddns = False
else:
updater_options['url'] = args.url
else:
process_message = "Must use --url if --provider is 'generic' " \
"(default)"
exec_result = EXECUTION_RESULT_NOK
update_ddns = False
response_code = None
response_text = None
if update_ddns:
ip_address = args.ip if args.ip else utils.get_ip()
if not ip_address:
process_message = "Unable to get IP address. Check connection."
exec_result = EXECUTION_RESULT_NOK
elif ip_address == utils.get_dns_ip(args.hostname):
process_message = "No update required."
else:
updater = provider_class(auth, args.hostname, updater_options)
print("Updating hostname '%s' with IP address %s "
"[provider: '%s']..."
% (args.hostname, ip_address, args.provider))
response_code, response_text = updater.update_dns(ip_address)
process_message = updater.status_message
proc_result = {
'exec_result': exec_result,
'response_code': response_code,
'response_text': response_text,
'process_message': process_message,
}
return proc_result
|
Execute the update based on command line args and returns a dictionary
with 'execution result, ''response code', 'response info' and
'process friendly message'.
|
def to_sql(cls, qc, **kwargs):
"""Write records stored in a DataFrame to a SQL database.
Args:
qc: the query compiler of the DF that we want to run to_sql on
kwargs: parameters for pandas.to_sql(**kwargs)
"""
# we first insert an empty DF in order to create the full table in the database
# This also helps to validate the input against pandas
# we would like to_sql() to complete only when all rows have been inserted into the database
# since the mapping operation is non-blocking, each partition will return an empty DF
# so at the end, the blocking operation will be this empty DF to_pandas
empty_df = qc.head(1).to_pandas().head(0)
empty_df.to_sql(**kwargs)
# so each partition will append its respective DF
kwargs["if_exists"] = "append"
columns = qc.columns
def func(df, **kwargs):
df.columns = columns
df.to_sql(**kwargs)
return pandas.DataFrame()
map_func = qc._prepare_method(func, **kwargs)
result = qc._map_across_full_axis(1, map_func)
# blocking operation
result.to_pandas()
|
Write records stored in a DataFrame to a SQL database.
Args:
qc: the query compiler of the DF that we want to run to_sql on
kwargs: parameters for pandas.to_sql(**kwargs)
|
def summarise_file_as_html(fname):
"""
takes a large data file and produces a HTML summary as html
"""
txt = '<H1>' + fname + '</H1>'
num_lines = 0
print('Reading OpenCyc file - ', fname)
with open(ip_folder + os.sep + fname, 'r') as f:
txt += '<PRE>'
for line in f:
if line.strip() != '':
num_lines += 1
if num_lines < 80:
txt += str(num_lines) + ': ' + escape_html(line) + ''
txt += '</PRE>'
txt += 'Total lines = ' + str(num_lines) + '<BR><BR>'
return txt
|
takes a large data file and produces a HTML summary as html
|
def icasa(taskname, mult=None, clearstart=False, loadthese=[],**kw0):
"""
runs a CASA task given a list of options.
A given task can be run multiple times with a different options,
in this case the options must be parsed as a list/tuple of dictionaries via mult, e.g
icasa('exportfits',mult=[{'imagename':'img1.image','fitsimage':'image1.fits},{'imagename':'img2.image','fitsimage':'image2.fits}]).
Options you want be common between the multiple commands should be specified as key word args.
"""
# create temp directory from which to run casapy
td = tempfile.mkdtemp(dir='.')
# we want get back to the working directory once casapy is launched
cdir = os.path.realpath('.')
# load modules in loadthese
_load = ""
if "os" not in loadthese or "import os" not in loadthese:
loadthese.append("os")
if loadthese:
exclude = filter(lambda line: line.startswith("import") or line.startswith("from"), loadthese)
for line in loadthese:
if line not in exclude:
line = "import %s"%line
_load += "%s\n"%line
if mult:
if isinstance(mult,(tuple,list)):
for opts in mult:
opts.update(kw0)
else:
mult.upadte(kw0)
mult = [mult]
else:
mult = [kw0]
run_cmd = """ """
for kw in mult:
task_cmds = []
for key,val in kw.items():
if isinstance(val,(str, unicode)):
val = '"%s"'%val
task_cmds .append('%s=%s'%(key,val))
task_cmds = ", ".join(task_cmds)
run_cmd += """
%s
os.chdir('%s')
%s
%s(%s)
"""%(_load, cdir,"clearstart()" if clearstart else "", taskname, task_cmds)
tf = tempfile.NamedTemporaryFile(suffix='.py')
tf.write(run_cmd)
tf.flush()
t0 = time.time()
# all logging information will be in the pyxis log files
print("Running {}".format(run_cmd))
xrun("cd", [td, "&& casa --nologger --log2term --nologfile -c", tf.name])
# log taskname.last
task_last = '%s.last'%taskname
if os.path.exists(task_last):
with open(task_last,'r') as last:
print('%s.last is: \n %s'%(taskname, last.read()))
# remove temp directory. This also gets rid of the casa log files; so long suckers!
xrun("rm", ["-fr ", td, task_last])
tf.close()
|
runs a CASA task given a list of options.
A given task can be run multiple times with a different options,
in this case the options must be parsed as a list/tuple of dictionaries via mult, e.g
icasa('exportfits',mult=[{'imagename':'img1.image','fitsimage':'image1.fits},{'imagename':'img2.image','fitsimage':'image2.fits}]).
Options you want be common between the multiple commands should be specified as key word args.
|
def validate_one_format(jupytext_format):
"""Validate extension and options for the given format"""
if not isinstance(jupytext_format, dict):
raise JupytextFormatError('Jupytext format should be a dictionary')
for key in jupytext_format:
if key not in _VALID_FORMAT_INFO + _VALID_FORMAT_OPTIONS:
raise JupytextFormatError("Unknown format option '{}' - should be one of '{}'".format(
key, "', '".join(_VALID_FORMAT_OPTIONS)))
value = jupytext_format[key]
if key in _BINARY_FORMAT_OPTIONS:
if not isinstance(value, bool):
raise JupytextFormatError("Format option '{}' should be a bool, not '{}'".format(key, str(value)))
if 'extension' not in jupytext_format:
raise JupytextFormatError('Missing format extension')
ext = jupytext_format['extension']
if ext not in NOTEBOOK_EXTENSIONS + ['.auto']:
raise JupytextFormatError("Extension '{}' is not a notebook extension. Please use one of '{}'.".format(
ext, "', '".join(NOTEBOOK_EXTENSIONS + ['.auto'])))
return jupytext_format
|
Validate extension and options for the given format
|
def init(self, game_info, static_data):
"""Take the game info and the static data needed to set up the game.
This must be called before render or get_actions for each game or restart.
Args:
game_info: A `sc_pb.ResponseGameInfo` object for this game.
static_data: A `StaticData` object for this game.
Raises:
ValueError: if there is nothing to render.
"""
self._game_info = game_info
self._static_data = static_data
if not game_info.HasField("start_raw"):
raise ValueError("Raw observations are required for the renderer.")
self._map_size = point.Point.build(game_info.start_raw.map_size)
if game_info.options.HasField("feature_layer"):
fl_opts = game_info.options.feature_layer
self._feature_screen_px = point.Point.build(fl_opts.resolution)
self._feature_minimap_px = point.Point.build(fl_opts.minimap_resolution)
self._feature_camera_width_world_units = fl_opts.width
self._render_rgb = False
else:
self._feature_screen_px = self._feature_minimap_px = None
if game_info.options.HasField("render"):
render_opts = game_info.options.render
self._rgb_screen_px = point.Point.build(render_opts.resolution)
self._rgb_minimap_px = point.Point.build(render_opts.minimap_resolution)
self._render_rgb = True
else:
self._rgb_screen_px = self._rgb_minimap_px = None
if not self._feature_screen_px and not self._rgb_screen_px:
raise ValueError("Nothing to render.")
try:
self.init_window()
self._initialized = True
except pygame.error as e:
self._initialized = False
logging.error("-" * 60)
logging.error("Failed to initialize pygame: %s", e)
logging.error("Continuing without pygame.")
logging.error("If you're using ssh and have an X server, try ssh -X.")
logging.error("-" * 60)
self._obs = sc_pb.ResponseObservation()
self._queued_action = None
self._queued_hotkey = ""
self._select_start = None
self._alerts = {}
self._past_actions = []
self._help = False
|
Take the game info and the static data needed to set up the game.
This must be called before render or get_actions for each game or restart.
Args:
game_info: A `sc_pb.ResponseGameInfo` object for this game.
static_data: A `StaticData` object for this game.
Raises:
ValueError: if there is nothing to render.
|
def auto(cls, func):
"""
The ``auto`` decorator wraps ``func`` in a context manager so that a handle is obtained.
.. note::
Please note, that most functions require the handle to continue being alive for future calls to data
retrieved from the function. In such cases, it's advisable to use the `requires_refcount` decorator, and
force the program using the library with obtaining a handle (and keeping it active.)
"""
@functools.wraps(func)
def auto_claim_handle(*args, **kwargs):
with cls():
return func(*args, **kwargs)
return auto_claim_handle
|
The ``auto`` decorator wraps ``func`` in a context manager so that a handle is obtained.
.. note::
Please note, that most functions require the handle to continue being alive for future calls to data
retrieved from the function. In such cases, it's advisable to use the `requires_refcount` decorator, and
force the program using the library with obtaining a handle (and keeping it active.)
|
def get_functions_writing_to_variable(self, variable):
'''
Return the functions writting the variable
'''
return [f for f in self.functions if f.is_writing(variable)]
|
Return the functions writting the variable
|
def reload(self):
"""
fonction de chargement automatique de la configuration
depuis le fichier extérieur
"""
# lecture du fichier de configuration en mode UTF-8
if self.config_filename != "":
try:
# content = open(self.config_filename).read()
self.config_content = codecs.open(self.config_filename, "r", "utf8").read()
except Exception as e:
raise Exception(
"{}: json/yaml settings error reading: {}".format(self.config_filename, e))
# init
self.is_json = False
self.is_yaml = False
# interprétation des données
error_str = ""
error_reading_json, error_reading_yaml = False, False
try:
self.settings = json.loads(self.config_content)
self.is_json = True
except Exception as e:
error_str = e
error_reading_json = True
if error_reading_json:
try:
# préservation des duplications de clef
# https://stackoverflow.com/questions/44904290/getting-duplicate-keys-in-yaml-using-python
import yaml
content = self.config_content.replace("\t", " ")
self.settings = yaml.load(content)
self.is_yaml = True
except Exception as e:
error_str = e
error_reading_yaml = True
if error_reading_json and error_reading_yaml:
raise Exception(
"{}: json/yaml settings incorrect: {}".format(self.config_filename, error_str))
|
fonction de chargement automatique de la configuration
depuis le fichier extérieur
|
def _dumpArrayToFile(filelike, array):
"""Serializes a 1-dimensional ``numpy.array`` to bytes, writes the bytes to
the filelike object and returns a dictionary with metadata, necessary to
restore the ``numpy.array`` from the file.
:param filelike: can be a file or a file-like object that provides the
methods ``.write()`` and ``.tell()``.
:param array: a 1-dimensional ``numpy.array``
:returns: a metadata dictionary ::
{'start': start position in the file, 'end': end position in the file,
'size': size of the array, 'dtype': numpy data type of the array
}
"""
bytedata = array.tobytes('C')
start = filelike.tell()
end = start + len(bytedata)
metadata = {'start': start, 'end': end, 'size': array.size,
'dtype': array.dtype.name
}
filelike.write(bytedata)
return metadata
|
Serializes a 1-dimensional ``numpy.array`` to bytes, writes the bytes to
the filelike object and returns a dictionary with metadata, necessary to
restore the ``numpy.array`` from the file.
:param filelike: can be a file or a file-like object that provides the
methods ``.write()`` and ``.tell()``.
:param array: a 1-dimensional ``numpy.array``
:returns: a metadata dictionary ::
{'start': start position in the file, 'end': end position in the file,
'size': size of the array, 'dtype': numpy data type of the array
}
|
def delete_record(self, domain, recordid, params=None):
''' /v1/dns/delete_record
POST - account
Deletes an individual DNS record
Link: https://www.vultr.com/api/#dns_delete_record
'''
params = update_params(params, {
'domain': domain,
'RECORDID': recordid
})
return self.request('/v1/dns/delete_record', params, 'POST')
|
/v1/dns/delete_record
POST - account
Deletes an individual DNS record
Link: https://www.vultr.com/api/#dns_delete_record
|
def _format(self, object, stream, indent, allowance, context, level):
"""
Recursive part of the formatting
"""
try:
PrettyPrinter._format(self, object, stream, indent, allowance, context, level)
except Exception as e:
stream.write(_format_exception(e))
|
Recursive part of the formatting
|
def has(self):
"""Whether the cache file exists in the file system."""
self._done = os.path.exists(self._cache_file)
return self._done or self._out is not None
|
Whether the cache file exists in the file system.
|
def levenshtein_distance(word1, word2):
"""
Computes the Levenshtein distance.
[Reference]: https://en.wikipedia.org/wiki/Levenshtein_distance
[Article]: Levenshtein, Vladimir I. (February 1966). "Binary codes capable of correcting deletions,
insertions,and reversals". Soviet Physics Doklady 10 (8): 707–710.
[Implementation]: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
"""
if len(word1) < len(word2):
return levenshtein_distance(word2, word1)
if len(word2) == 0:
return len(word1)
previous_row = list(range(len(word2) + 1))
for i, char1 in enumerate(word1):
current_row = [i + 1]
for j, char2 in enumerate(word2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (char1 != char2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
|
Computes the Levenshtein distance.
[Reference]: https://en.wikipedia.org/wiki/Levenshtein_distance
[Article]: Levenshtein, Vladimir I. (February 1966). "Binary codes capable of correcting deletions,
insertions,and reversals". Soviet Physics Doklady 10 (8): 707–710.
[Implementation]: https://en.wikibooks.org/wiki/Algorithm_Implementation/Strings/Levenshtein_distance#Python
|
def inject2(module_name=None, module_prefix=None, DEBUG=False, module=None, N=1):
""" wrapper that depricates print_ and printDBG """
if module_prefix is None:
module_prefix = '[%s]' % (module_name,)
noinject(module_name, module_prefix, DEBUG, module, N=N)
module = _get_module(module_name, module)
rrr = make_module_reload_func(None, module_prefix, module)
profile_ = make_module_profile_func(None, module_prefix, module)
print = make_module_print_func(module)
return print, rrr, profile_
|
wrapper that depricates print_ and printDBG
|
def to_dict(self):
"""Return the schema as a dict ready to be serialized.
"""
schema = super(Schema, self).to_dict()
schema['$schema'] = "http://json-schema.org/draft-04/schema#"
if self._id:
schema['id'] = self._id
if self._desc:
schema['description'] = self._desc
return schema
|
Return the schema as a dict ready to be serialized.
|
def downside_risk(returns,
required_return=0,
period=DAILY,
annualization=None,
out=None):
"""
Determines the downside deviation below a threshold
Parameters
----------
returns : pd.Series or np.ndarray or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
downside_deviation : float or pd.Series
depends on input type
series ==> float
DataFrame ==> pd.Series
Note
-----
See `<https://www.sunrisecapital.com/wp-content/uploads/2014/06/Futures_
Mag_Sortino_0213.pdf>`__ for more details, specifically why using the
standard deviation of the negative returns is not correct.
"""
allocated_output = out is None
if allocated_output:
out = np.empty(returns.shape[1:])
returns_1d = returns.ndim == 1
if len(returns) < 1:
out[()] = np.nan
if returns_1d:
out = out.item()
return out
ann_factor = annualization_factor(period, annualization)
downside_diff = np.clip(
_adjust_returns(
np.asanyarray(returns),
np.asanyarray(required_return),
),
np.NINF,
0,
)
np.square(downside_diff, out=downside_diff)
nanmean(downside_diff, axis=0, out=out)
np.sqrt(out, out=out)
np.multiply(out, np.sqrt(ann_factor), out=out)
if returns_1d:
out = out.item()
elif isinstance(returns, pd.DataFrame):
out = pd.Series(out, index=returns.columns)
return out
|
Determines the downside deviation below a threshold
Parameters
----------
returns : pd.Series or np.ndarray or pd.DataFrame
Daily returns of the strategy, noncumulative.
- See full explanation in :func:`~empyrical.stats.cum_returns`.
required_return: float / series
minimum acceptable return
period : str, optional
Defines the periodicity of the 'returns' data for purposes of
annualizing. Value ignored if `annualization` parameter is specified.
Defaults are::
'monthly':12
'weekly': 52
'daily': 252
annualization : int, optional
Used to suppress default values available in `period` to convert
returns into annual returns. Value should be the annual frequency of
`returns`.
out : array-like, optional
Array to use as output buffer.
If not passed, a new array will be created.
Returns
-------
downside_deviation : float or pd.Series
depends on input type
series ==> float
DataFrame ==> pd.Series
Note
-----
See `<https://www.sunrisecapital.com/wp-content/uploads/2014/06/Futures_
Mag_Sortino_0213.pdf>`__ for more details, specifically why using the
standard deviation of the negative returns is not correct.
|
def add_at(self, moment: float, fn_process: Callable, *args: Any, **kwargs: Any) -> 'Process':
"""
Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note
that times in the past when compared to the current moment on the simulated clock are forbidden.
See method add() for more details.
"""
delay = moment - self.now()
if delay < 0.0:
raise ValueError(
f"The given moment to start the process ({moment:f}) is in the past (now is {self.now():f})."
)
return self.add_in(delay, fn_process, *args, **kwargs)
|
Adds a process to the simulation, which is made to start at the given exact time on the simulated clock. Note
that times in the past when compared to the current moment on the simulated clock are forbidden.
See method add() for more details.
|
def get(self, key):
""" Executes the callable registered at the specified key and returns
its value. Subsequent queries are cached internally.
`key`
String key for a previously stored callable.
"""
if not key in self._actions:
return None
if not key in self._cache:
self._cache[key] = self._actions[key]()
return self._cache[key]
|
Executes the callable registered at the specified key and returns
its value. Subsequent queries are cached internally.
`key`
String key for a previously stored callable.
|
def __set_date(self, value):
'''
Sets the invoice date.
@param value:datetime
'''
value = date_to_datetime(value)
if value > datetime.now() + timedelta(hours=14, minutes=1): #More or less 14 hours from now in case the submitted date was local
raise ValueError("Date cannot be in the future.")
if self.__due_date and value.date() > self.__due_date:
raise ValueError("Date cannot be posterior to the due date.")
self.__date = value
|
Sets the invoice date.
@param value:datetime
|
def unique_list(input_, key=lambda x:x):
"""Return the unique elements from the input, in order."""
seen = set()
output = []
for x in input_:
keyx = key(x)
if keyx not in seen:
seen.add(keyx)
output.append(x)
return output
|
Return the unique elements from the input, in order.
|
def create_single_button_clone(self, submit_text='Submit', submit_css_class='btn-primary',
read_form_data=True, form_type=None):
"""
This will create a copy of this form, with all of inputs replaced with hidden inputs,
and with a single submit button. This allows you to easily create a "button" that
will submit a post request which is identical to the current state of the form.
You could then, if required, change some of the values in the hidden inputs.
Note: Submit buttons are not included, and the submit button value will change
"""
from .basicfields import BooleanCheckbox, HiddenField, SubmitButton
fields = []
for field in self.all_fields:
# If it's valid for the field to be missing, and the value of the field is empty,
# then don't add it, otherwise create a hidden input
if field.allow_missing:
if field.value is None or field.value == '':
continue
elif isinstance(field, BooleanCheckbox) and not field.value:
continue
# TODO: is this right?
elif isinstance(field, SubmitButton):
continue
# If we get here, we need to add this field to the list
fields.append(HiddenField(field.name, field.value))
form = Form(fields, action=self.action, method=self.method, submit_css_class=submit_css_class,
submit_text=submit_text, read_form_data=read_form_data,
disable_csrf=self.disable_csrf, readonly=False,
form_type=form_type if form_type else self.form_type)
return form
|
This will create a copy of this form, with all of inputs replaced with hidden inputs,
and with a single submit button. This allows you to easily create a "button" that
will submit a post request which is identical to the current state of the form.
You could then, if required, change some of the values in the hidden inputs.
Note: Submit buttons are not included, and the submit button value will change
|
def polygon(self, points, stroke=None, fill=None, stroke_width=1, disable_anti_aliasing=False):
"""
:param points: List of points
"""
self.put(' <polygon points="')
self.put(' '.join(['%s,%s' % p for p in points]))
self.put('" stroke-width="')
self.put(str(stroke_width))
self.put('"')
if fill:
self.put(' fill="')
self.put(fill)
self.put('"')
if stroke:
self.put(' stroke="')
self.put(stroke)
self.put('"')
if disable_anti_aliasing:
self.put(' shape-rendering="crispEdges"')
self.put('/>\n')
|
:param points: List of points
|
def quote(code):
"""Returns quoted code if not already quoted and if possible
Parameters
----------
code: String
\tCode thta is quoted
"""
try:
code = code.rstrip()
except AttributeError:
# code is not a string, may be None --> There is no code to quote
return code
if code and code[0] + code[-1] not in ('""', "''", "u'", '"') \
and '"' not in code:
return 'u"' + code + '"'
else:
return code
|
Returns quoted code if not already quoted and if possible
Parameters
----------
code: String
\tCode thta is quoted
|
def _extract_game_info(self, games):
"""
Parse game information from all boxscores.
Find the major game information for all boxscores listed on a
particular boxscores webpage and return the results in a list.
Parameters
----------
games : generator
A generator where each element points to a boxscore on the parsed
boxscores webpage.
Returns
-------
list
Returns a ``list`` of dictionaries where each dictionary contains
the name and abbreviations for both the home and away teams, and a
link to the game's boxscore.
"""
all_boxscores = []
for game in games:
details = self._get_team_details(game)
away_name, away_abbr, away_score, home_name, home_abbr, \
home_score = details
boxscore_url = game('td[class="right gamelink"] a')
boxscore_uri = self._get_boxscore_uri(boxscore_url)
losers = [l for l in game('tr[class="loser"]').items()]
winner = self._get_team_results(game('tr[class="winner"]'))
loser = self._get_team_results(game('tr[class="loser"]'))
# Occurs when the boxscore format is invalid and the game should be
# skipped to avoid conflicts populating the game information.
if (len(losers) != 2 and loser and not winner) or \
(len(losers) != 2 and winner and not loser):
continue
# Occurs when information couldn't be parsed from the boxscore or
# the game hasn't occurred yet. In this case, the winner should be
# None to avoid conflicts.
if not winner or len(losers) == 2:
winning_name = None
winning_abbreviation = None
else:
winning_name, winning_abbreviation = winner
# Occurs when information couldn't be parsed from the boxscore or
# the game hasn't occurred yet. In this case, the winner should be
# None to avoid conflicts.
if not loser or len(losers) == 2:
losing_name = None
losing_abbreviation = None
else:
losing_name, losing_abbreviation = loser
game_info = {
'boxscore': boxscore_uri,
'away_name': away_name,
'away_abbr': away_abbr,
'away_score': away_score,
'home_name': home_name,
'home_abbr': home_abbr,
'home_score': home_score,
'winning_name': winning_name,
'winning_abbr': winning_abbreviation,
'losing_name': losing_name,
'losing_abbr': losing_abbreviation
}
all_boxscores.append(game_info)
return all_boxscores
|
Parse game information from all boxscores.
Find the major game information for all boxscores listed on a
particular boxscores webpage and return the results in a list.
Parameters
----------
games : generator
A generator where each element points to a boxscore on the parsed
boxscores webpage.
Returns
-------
list
Returns a ``list`` of dictionaries where each dictionary contains
the name and abbreviations for both the home and away teams, and a
link to the game's boxscore.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.