code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
"""
raven.events
~~~~~~~~~~~~
:copyright: (c) 2010-2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
import sys
from raven.utils.encoding import to_unicode
from raven.utils.stacks import get_stack_info, iter_traceback_frames
__all__ = ('BaseEvent', 'Exception', 'Message', 'Query')
class BaseEvent(object):
def __init__(self, client):
self.client = client
self.logger = logging.getLogger(__name__)
def to_string(self, data):
raise NotImplementedError
def capture(self, **kwargs):
return {
}
def transform(self, value):
return self.client.transform(value)
class Exception(BaseEvent):
"""
Exceptions store the following metadata:
- value: 'My exception value'
- type: 'ClassName'
- module '__builtin__' (i.e. __builtin__.TypeError)
- frames: a list of serialized frames (see _get_traceback_frames)
"""
def to_string(self, data):
exc = data['sentry.interfaces.Exception']
if exc['value']:
return '%s: %s' % (exc['type'], exc['value'])
return exc['type']
def capture(self, exc_info=None, **kwargs):
if not exc_info or exc_info is True:
exc_info = sys.exc_info()
if not exc_info:
raise ValueError('No exception found')
exc_type, exc_value, exc_traceback = exc_info
try:
frames = get_stack_info(
iter_traceback_frames(exc_traceback),
transformer=self.transform)
exc_module = getattr(exc_type, '__module__', None)
if exc_module:
exc_module = str(exc_module)
exc_type = getattr(exc_type, '__name__', '<unknown>')
return {
'level': kwargs.get('level', logging.ERROR),
'sentry.interfaces.Exception': {
'value': to_unicode(exc_value),
'type': str(exc_type),
'module': to_unicode(exc_module),
'stacktrace': {
'frames': frames
}
},
}
finally:
try:
del exc_type, exc_value, exc_traceback
except Exception as e:
self.logger.exception(e)
class Message(BaseEvent):
"""
Messages store the following metadata:
- message: 'My message from %s about %s'
- params: ('foo', 'bar')
"""
def capture(self, message, params=(), formatted=None, **kwargs):
message = to_unicode(message)
data = {
'sentry.interfaces.Message': {
'message': message,
'params': self.transform(params),
},
}
if 'message' not in data:
data['message'] = formatted or message
return data
class Query(BaseEvent):
"""
Messages store the following metadata:
- query: 'SELECT * FROM table'
- engine: 'postgesql_psycopg2'
"""
def to_string(self, data):
sql = data['sentry.interfaces.Query']
return sql['query']
def capture(self, query, engine, **kwargs):
return {
'sentry.interfaces.Query': {
'query': to_unicode(query),
'engine': str(engine),
}
}
|
Goldmund-Wyldebeast-Wunderliebe/raven-python
|
raven/events.py
|
Python
|
bsd-3-clause
| 3,417
|
import os
import ast
from collectors.lib.collectorbase import CollectorBase
class ManualScript(CollectorBase):
def __init__(self, config, logger, readq):
super(ManualScript, self).__init__(config, logger, readq)
self.command = ast.literal_eval(self.get_config("command"))
def __call__(self):
if len(self.command):
for command in self.command:
stdout = os.popen(command).read().splitlines()
for metric in stdout:
self._readq.nput(metric)
|
wangy1931/tcollector
|
collectors/builtin/manual_script.py
|
Python
|
lgpl-3.0
| 534
|
#! /usr/bin/env python
#
import logging
from autopyfactory.interfaces import SchedInterface
class MaxToSubmit(SchedInterface):
"""
Keep the number of jobs submitted during the whole history
of the APFQueue below some limit
"""
id = 'maxtosubmit'
# TO BE IMPLEMENTED
|
PanDAWMS/autopyfactory
|
autopyfactory/plugins/queue/sched/MaxToSubmit.py
|
Python
|
apache-2.0
| 295
|
"""This module contains constants used by the Lifemapper web services
"""
import os
from LmServer.base.utilities import get_mjd_time_from_iso_8601
from LmServer.common.lmconstants import SESSION_DIR
from LmServer.common.localconstants import SCRATCH_PATH, APP_PATH
from LmWebServer.common.localconstants import PACKAGING_DIR
# CherryPy constants
SESSION_PATH = os.path.join(SCRATCH_PATH, SESSION_DIR)
SESSION_KEY = '_cp_username'
REFERER_KEY = 'lm_referer'
# Results package constants
GRIDSET_DIR = 'gridset'
MATRIX_DIR = os.path.join(GRIDSET_DIR, 'matrix')
SDM_PRJ_DIR = os.path.join(GRIDSET_DIR, 'sdm')
DYN_PACKAGE_DIR = 'package'
STATIC_PACKAGE_PATH = os.path.join(APP_PATH, PACKAGING_DIR)
MAX_PROJECTIONS = 1000
# .............................................................................
class HTTPMethod:
"""Constant class for HTTP methods
"""
DELETE = 'DELETE'
GET = 'GET'
POST = 'POST'
PUT = 'PUT'
# .............................................................................
def sci_name_prep(name):
"""Prepare scientific name
"""
strip_chars = [' ', '+', '%20', ',', '%2C']
for strip_chr in strip_chars:
name = name.replace(strip_chr, '')
return name[:20]
# .............................................................................
def boolify_parameter(param, default=True):
"""Convert an input query parameter to boolean."""
try:
# If zero or one
return bool(int(param))
except ValueError:
try:
# Try processing a string
str_val = param.lower().strip()
if str_val == 'false' or str_val == 'no':
return False
if str_val == 'true' or str_val == 'yes':
return True
except Exception:
pass
# Return default if we can't figure it out
return default
# This constant is used for processing query parameters. If no 'processIn'
# key, just take the parameter as it comes in
# Note: The dictionary keys are the .lower() version of the parameter names.
# The 'name' value of each key is what it gets translated to
# The point of this structure is to allow query parameters to be
# case-insensitive
QP_NAME_KEY = 'name'
QP_PROCESS_KEY = 'process_in'
QUERY_PARAMETERS = {
'afterstatus': {
QP_NAME_KEY: 'after_status',
QP_PROCESS_KEY: int
},
'aftertime': {
QP_NAME_KEY: 'after_time',
QP_PROCESS_KEY: get_mjd_time_from_iso_8601
},
'agent': {
QP_NAME_KEY: 'agent'
},
'algorithmcode': {
QP_NAME_KEY: 'algorithm_code',
},
'altpredcode': {
QP_NAME_KEY: 'alt_pred_code'
},
'archivename': {
QP_NAME_KEY: 'archive_name'
},
'atom': {
QP_NAME_KEY: 'atom',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true
},
'beforestatus': {
QP_NAME_KEY: 'before_status',
QP_PROCESS_KEY: int
},
'beforetime': {
QP_NAME_KEY: 'before_time',
QP_PROCESS_KEY: get_mjd_time_from_iso_8601
},
'bbox': {
# Comes in as a comma separated list, turn it into a tuple of floats
QP_NAME_KEY: 'bbox',
# QP_PROCESS_KEY: lambda x: [float(i) for i in x.split(',')]
},
'bgcolor': {
QP_NAME_KEY: 'bgcolor',
},
'canonicalname': {
QP_NAME_KEY: 'canonical_name'
},
'catalognumber': {
QP_NAME_KEY: 'catalog_number'
},
'cellsides': {
QP_NAME_KEY: 'cell_sides',
QP_PROCESS_KEY: int
},
'cellsize': {
QP_NAME_KEY: 'cell_size',
QP_PROCESS_KEY: float
},
'collection': {
QP_NAME_KEY: 'collection'
},
'color': {
QP_NAME_KEY: 'color',
},
'coverage': {
QP_NAME_KEY: 'coverage'
},
'crs': {
# TODO: Consider processing the EPSG here
QP_NAME_KEY: 'crs'
},
'datecode': {
QP_NAME_KEY: 'date_code'
},
'detail': {
QP_NAME_KEY: 'detail',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'displayname': {
QP_NAME_KEY: 'display_name'
},
'docalc': {
QP_NAME_KEY: 'do_calc',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'domcpa': {
QP_NAME_KEY: 'do_mcpa',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'envcode': {
QP_NAME_KEY: 'env_code'
},
'envtypeid': {
QP_NAME_KEY: 'env_type_id',
QP_PROCESS_KEY: int
},
'epsgcode': {
QP_NAME_KEY: 'epsg_code',
QP_PROCESS_KEY: int
},
'exceptions': {
QP_NAME_KEY: 'exceptions'
},
'filename': {
QP_NAME_KEY: 'file_name'
},
'fillpoints': {
QP_NAME_KEY: 'fill_points',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'format': {
# TODO: Forward to respFormat since format is reserved
QP_NAME_KEY: 'format_',
},
'gcmcode': {
QP_NAME_KEY: 'gcm_code',
},
'gridsetid': {
QP_NAME_KEY: 'gridset_id',
QP_PROCESS_KEY: int
},
'hasbranchlengths': {
QP_NAME_KEY: 'has_branch_lengths',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true
},
'height': {
QP_NAME_KEY: 'height',
QP_PROCESS_KEY: int
},
'ident1': {
QP_NAME_KEY: 'ident1'
},
'ident2': {
QP_NAME_KEY: 'ident2'
},
'includecsvs': {
QP_NAME_KEY: 'include_csvs',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'includesdms': {
QP_NAME_KEY: 'include_sdms',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=False) # Boolify, default is false
},
'isbinary': {
QP_NAME_KEY: 'is_binary',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true
},
'isultrametric': {
QP_NAME_KEY: 'is_ultrametric',
QP_PROCESS_KEY: lambda x: boolify_parameter(x, default=True) # Boolify, default is true
},
'keyword': {
QP_NAME_KEY: 'keyword',
QP_PROCESS_KEY: lambda x: [float(x)]
},
'layer': {
QP_NAME_KEY: 'layer'
},
'layers': {
QP_NAME_KEY: 'layers',
# QP_PROCESS_KEY: lambda x: [i for i in x.split(',')]
},
'layertype': {
QP_NAME_KEY: 'layer_type',
QP_PROCESS_KEY: int
},
'limit': {
QP_NAME_KEY: 'limit',
QP_PROCESS_KEY: lambda x: max(1, int(x)) # Integer, minimum is one
},
'map': {
QP_NAME_KEY: 'map_name'
},
'mapname': {
QP_NAME_KEY: 'map_name'
},
'matrixtype': {
QP_NAME_KEY: 'matrix_type',
QP_PROCESS_KEY: int
},
'metadata': {
QP_NAME_KEY: 'metadata'
},
'metastring': {
QP_NAME_KEY: 'meta_string'
},
'modelscenariocode': {
QP_NAME_KEY: 'model_scenario_code'
},
'minimumnumberofpoints': {
QP_NAME_KEY: 'minimum_number_of_points',
QP_PROCESS_KEY: lambda x: max(1, int(x)) # Integer, minimum is one
},
'numpermutations': {
QP_NAME_KEY: 'num_permutations',
QP_PROCESS_KEY: int
},
'occurrencesetid': {
QP_NAME_KEY: 'occurrence_set_id',
QP_PROCESS_KEY: int
},
'operation': {
QP_NAME_KEY: 'operation'
},
'offset': {
QP_NAME_KEY: 'offset',
QP_PROCESS_KEY: lambda x: max(0, int(x)) # Integer, minimum is zero
},
'pathbiogeoid': {
QP_NAME_KEY: 'path_biogeo_id'
},
'pathgridsetid': {
QP_NAME_KEY: 'path_gridset_id'
},
'pathlayerid': {
QP_NAME_KEY: 'path_layer_id'
},
'pathmatrixid': {
QP_NAME_KEY: 'path_matrix_id'
},
'pathoccsetid': {
QP_NAME_KEY: 'path_occset_id'
},
'pathprojectionid': {
QP_NAME_KEY: 'path_projection_id'
},
'pathscenarioid': {
QP_NAME_KEY: 'path_scenario_id'
},
'pathscenariopackageid': {
QP_NAME_KEY: 'path_scenario_package_id'
},
'pathshapegridid': {
QP_NAME_KEY: 'path_shapegrid_id'
},
'pathtreeid': {
QP_NAME_KEY: 'path_tree_id'
},
'pointmax': {
QP_NAME_KEY: 'point_max',
QP_PROCESS_KEY: int
},
'pointmin': {
QP_NAME_KEY: 'point_min',
QP_PROCESS_KEY: int
},
'projectionscenariocode': {
QP_NAME_KEY: 'projection_scenario_code'
},
'provider': {
QP_NAME_KEY: 'provider'
},
'request': {
QP_NAME_KEY: 'request'
},
'resolution': {
QP_NAME_KEY: 'resolution'
},
'scenariocode': {
QP_NAME_KEY: 'scenario_code'
},
'scenarioid': {
QP_NAME_KEY: 'scenario_id',
QP_PROCESS_KEY: int
},
'scientificname': {
QP_NAME_KEY: 'scientific_name',
QP_PROCESS_KEY: sci_name_prep
},
'searchstring': {
QP_NAME_KEY: 'search_string'
},
'service': {
QP_NAME_KEY: 'service'
},
'shapegridid': {
QP_NAME_KEY: 'shapegrid_id'
},
'sld': {
QP_NAME_KEY: 'sld'
},
'sldbody': {
QP_NAME_KEY: 'sld_body'
},
'squid': {
QP_NAME_KEY: 'squid',
# TODO: Evaluate what needs to be done to process into list
QP_PROCESS_KEY: lambda x: x
},
'srs': {
# TODO: Forward to crs for WMS 1.3.0?
QP_NAME_KEY: 'srs'
},
'status': {
QP_NAME_KEY: 'status',
QP_PROCESS_KEY: int
},
'styles': {
QP_NAME_KEY: 'styles',
# QP_PROCESS_KEY: lambda x: [i for i in x.split(',')]
},
'taxonclass': {
QP_NAME_KEY: 'class_'
},
'taxonfamily': {
QP_NAME_KEY: 'family'
},
'taxongenus': {
QP_NAME_KEY: 'genus'
},
'taxonkingdom': {
QP_NAME_KEY: 'kingdom'
},
'taxonorder': {
QP_NAME_KEY: 'order_'
},
'taxonphylum': {
QP_NAME_KEY: 'phylum'
},
'taxonspecies': {
QP_NAME_KEY: 'species'
},
'time': {
QP_NAME_KEY: 'time'
},
'transparent': {
QP_NAME_KEY: 'transparent',
# QP_PROCESS_KEY: lambda x: bool(x.lower() == 'true')
},
'treename': {
QP_NAME_KEY: 'name' # Map to 'name' for processing
},
'treeschema': {
QP_NAME_KEY: 'tree_schema'
},
'file': {
QP_NAME_KEY: 'file'
},
'uploadtype': {
QP_NAME_KEY: 'upload_type'
},
'url': {
QP_NAME_KEY: 'url'
},
'user': {
QP_NAME_KEY: 'url_user',
QP_PROCESS_KEY: lambda x: x
},
'version': {
QP_NAME_KEY: 'version'
},
'who': {
QP_NAME_KEY: 'who'
},
'why': {
QP_NAME_KEY: 'why'
},
'width': {
QP_NAME_KEY: 'width',
QP_PROCESS_KEY: int
},
# Authentication parameters
'address1': {
QP_NAME_KEY: 'address1'
},
'address2': {
QP_NAME_KEY: 'address2'
},
'address3': {
QP_NAME_KEY: 'address3'
},
'phone': {
QP_NAME_KEY: 'phone'
},
'email': {
QP_NAME_KEY: 'email'
},
'firstname': {
QP_NAME_KEY: 'first_name'
},
'institution': {
QP_NAME_KEY: 'institution'
},
'lastname': {
QP_NAME_KEY: 'last_name'
},
'pword': {
QP_NAME_KEY: 'pword'
},
'pword1': {
QP_NAME_KEY: 'pword1'
},
'userid': {
QP_NAME_KEY: 'user_id'
},
}
# Kml
KML_NAMESPACE = "http://earth.google.com/kml/2.2"
KML_NS_PREFIX = None
# .............................................................................
class APIPostKeys:
"""This class contains constants for API JSON POST keys
"""
ALGORITHM = 'algorithm'
ALGORITHM_CODE = 'code'
ALGORITHM_PARAMETERS = 'parameters'
ARCHIVE_NAME = 'archive_name'
BUFFER = 'buffer'
CELL_SIDES = 'cell_sides'
DELIMITER = 'delimiter'
DO_PAM_STATS = 'compute_pam_stats'
DO_MCPA = 'compute_mcpa'
GLOBAL_PAM = 'global_pam'
HULL_REGION = 'hull_region_intersect_mask'
INTERSECT_PARAMETERS = 'intersect_parameters'
MAX_PRESENCE = 'max_presence'
MAX_X = 'maxx'
MAX_Y = 'maxy'
MCPA = 'mcpa'
MIN_PERCENT = 'min_percent'
MIN_POINTS = 'point_count_min'
MIN_PRESENCE = 'min_presence'
MIN_X = 'minx'
MIN_Y = 'miny'
MODEL_SCENARIO = 'model_scenario'
NAME = 'name'
OCCURRENCE = 'occurrence'
OCCURRENCE_IDS = 'occurrence_ids'
PACKAGE_FILENAME = 'scenario_package_filename'
PACKAGE_NAME = 'scenario_package_name'
PAM_STATS = 'pam_stats'
POINTS_FILENAME = 'points_filename'
PROJECTION_SCENARIO = 'projection_scenario'
REGION = 'region'
RESOLUTION = 'resolution'
SCENARIO_CODE = 'scenario_code'
SCENARIO_PACKAGE = 'scenario_package'
SDM = 'sdm'
SHAPEGRID = 'shapegrid'
TAXON_IDS = 'taxon_ids'
TAXON_NAMES = 'taxon_names'
TREE = 'tree'
TREE_FILENAME = 'tree_file_name'
VALUE_NAME = 'value_name'
|
lifemapper/core
|
LmWebServer/common/lmconstants.py
|
Python
|
gpl-3.0
| 13,281
|
# initializing pardon as a module
|
Komish/pardon
|
pardon/__init__.py
|
Python
|
mit
| 34
|
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.i18n import _
from nova import objects
from nova.openstack.common import log as logging
from nova.pci import device
from nova.pci import stats
LOG = logging.getLogger(__name__)
class PciDevTracker(object):
"""Manage pci devices in a compute node.
This class fetches pci passthrough information from hypervisor
and trackes the usage of these devices.
It's called by compute node resource tracker to allocate and free
devices to/from instances, and to update the available pci passthrough
devices information from hypervisor periodically. The devices
information is updated to DB when devices information is changed.
"""
def __init__(self, node_id=None):
"""Create a pci device tracker.
If a node_id is passed in, it will fetch pci devices information
from database, otherwise, it will create an empty devices list
and the resource tracker will update the node_id information later.
"""
super(PciDevTracker, self).__init__()
self.stale = {}
self.node_id = node_id
self.stats = stats.PciDeviceStats()
if node_id:
self.pci_devs = list(
objects.PciDeviceList.get_by_compute_node(context, node_id))
else:
self.pci_devs = []
self._initial_instance_usage()
def _initial_instance_usage(self):
self.allocations = collections.defaultdict(list)
self.claims = collections.defaultdict(list)
for dev in self.pci_devs:
uuid = dev['instance_uuid']
if dev['status'] == 'claimed':
self.claims[uuid].append(dev)
elif dev['status'] == 'allocated':
self.allocations[uuid].append(dev)
elif dev['status'] == 'available':
self.stats.add_device(dev)
@property
def all_devs(self):
return self.pci_devs
def save(self, context):
for dev in self.pci_devs:
if dev.obj_what_changed():
dev.save(context)
self.pci_devs = [dev for dev in self.pci_devs
if dev['status'] != 'deleted']
@property
def pci_stats(self):
return self.stats
def set_hvdevs(self, devices):
"""Sync the pci device tracker with hypervisor information.
To support pci device hot plug, we sync with the hypervisor
periodically, fetching all devices information from hypervisor,
update the tracker and sync the DB information.
Devices should not be hot-plugged when assigned to a guest,
but possibly the hypervisor has no such guarantee. The best
we can do is to give a warning if a device is changed
or removed while assigned.
"""
exist_addrs = set([dev['address'] for dev in self.pci_devs])
new_addrs = set([dev['address'] for dev in devices])
for existed in self.pci_devs:
if existed['address'] in exist_addrs - new_addrs:
try:
device.remove(existed)
except exception.PciDeviceInvalidStatus as e:
LOG.warn(_("Trying to remove device with %(status)s "
"ownership %(instance_uuid)s because of "
"%(pci_exception)s"), {'status': existed.status,
'instance_uuid': existed.instance_uuid,
'pci_exception': e.format_message()})
# Note(yjiang5): remove the device by force so that
# db entry is cleaned in next sync.
existed.status = 'removed'
else:
# Note(yjiang5): no need to update stats if an assigned
# device is hot removed.
self.stats.remove_device(existed)
else:
new_value = next((dev for dev in devices if
dev['address'] == existed['address']))
new_value['compute_node_id'] = self.node_id
if existed['status'] in ('claimed', 'allocated'):
# Pci properties may change while assigned because of
# hotplug or config changes. Although normally this should
# not happen.
# As the devices have been assigned to a instance, we defer
# the change till the instance is destroyed. We will
# not sync the new properties with database before that.
# TODO(yjiang5): Not sure if this is a right policy, but
# at least it avoids some confusion and, if needed,
# we can add more action like killing the instance
# by force in future.
self.stale[new_value['address']] = new_value
else:
device.update_device(existed, new_value)
for dev in [dev for dev in devices if
dev['address'] in new_addrs - exist_addrs]:
dev['compute_node_id'] = self.node_id
dev_obj = objects.PciDevice.create(dev)
self.pci_devs.append(dev_obj)
self.stats.add_device(dev_obj)
def _claim_instance(self, context, instance, prefix=''):
pci_requests = objects.InstancePCIRequests.get_by_instance(
context, instance)
if not pci_requests.requests:
return None
devs = self.stats.consume_requests(pci_requests.requests)
if not devs:
raise exception.PciDeviceRequestFailed(pci_requests)
for dev in devs:
device.claim(dev, instance)
return devs
def _allocate_instance(self, instance, devs):
for dev in devs:
device.allocate(dev, instance)
def _free_device(self, dev, instance=None):
device.free(dev, instance)
stale = self.stale.pop(dev['address'], None)
if stale:
device.update_device(dev, stale)
self.stats.add_device(dev)
def _free_instance(self, instance):
# Note(yjiang5): When a instance is resized, the devices in the
# destination node are claimed to the instance in prep_resize stage.
# However, the instance contains only allocated devices
# information, not the claimed one. So we can't use
# instance['pci_devices'] to check the devices to be freed.
for dev in self.pci_devs:
if (dev['status'] in ('claimed', 'allocated') and
dev['instance_uuid'] == instance['uuid']):
self._free_device(dev)
def update_pci_for_instance(self, context, instance):
"""Update instance's pci usage information.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
uuid = instance['uuid']
vm_state = instance['vm_state']
task_state = instance['task_state']
if vm_state == vm_states.DELETED:
if self.allocations.pop(uuid, None):
self._free_instance(instance)
elif self.claims.pop(uuid, None):
self._free_instance(instance)
elif task_state == task_states.RESIZE_MIGRATED:
devs = self.allocations.pop(uuid, None)
if devs:
self._free_instance(instance)
elif task_state == task_states.RESIZE_FINISH:
devs = self.claims.pop(uuid, None)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
elif (uuid not in self.allocations and
uuid not in self.claims):
devs = self._claim_instance(context, instance)
if devs:
self._allocate_instance(instance, devs)
self.allocations[uuid] = devs
def update_pci_for_migration(self, context, instance, sign=1):
"""Update instance's pci usage information when it is migrated.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock.
:param sign: claim devices for instance when sign is 1, remove
the claims when sign is -1
"""
uuid = instance['uuid']
if sign == 1 and uuid not in self.claims:
devs = self._claim_instance(context, instance, 'new_')
if devs:
self.claims[uuid] = devs
if sign == -1 and uuid in self.claims:
self._free_instance(instance)
def clean_usage(self, instances, migrations, orphans):
"""Remove all usages for instances not passed in the parameter.
The caller should hold the COMPUTE_RESOURCE_SEMAPHORE lock
"""
existed = [inst['uuid'] for inst in instances]
existed += [mig['instance_uuid'] for mig in migrations]
existed += [inst['uuid'] for inst in orphans]
for uuid in self.claims.keys():
if uuid not in existed:
devs = self.claims.pop(uuid, [])
for dev in devs:
self._free_device(dev)
for uuid in self.allocations.keys():
if uuid not in existed:
devs = self.allocations.pop(uuid, [])
for dev in devs:
self._free_device(dev)
def set_compute_node_id(self, node_id):
"""Set the compute node id that this object is tracking for.
In current resource tracker implementation, the
compute_node entry is created in the last step of
update_available_resoruces, thus we have to lazily set the
compute_node_id at that time.
"""
if self.node_id and self.node_id != node_id:
raise exception.PciTrackerInvalidNodeId(node_id=self.node_id,
new_node_id=node_id)
self.node_id = node_id
for dev in self.pci_devs:
dev.compute_node_id = node_id
def get_instance_pci_devs(inst, request_id=None):
"""Get the devices allocated to one or all requests for an instance.
- For generic PCI request, the request id is None.
- For sr-iov networking, the request id is a valid uuid
- There are a couple of cases where all the PCI devices allocated to an
instance need to be returned. Refer to libvirt driver that handles
soft_reboot and hard_boot of 'xen' instances.
"""
pci_devices = inst.pci_devices
return [device for device in pci_devices if
device.request_id == request_id or request_id == 'all']
|
badock/nova
|
nova/pci/manager.py
|
Python
|
apache-2.0
| 11,380
|
"""Low-level MediaFire API Client"""
from __future__ import unicode_literals
import hashlib
import requests
import logging
import six
from six.moves.urllib.parse import urlencode
from requests_toolbelt import MultipartEncoder
from requests.adapters import HTTPAdapter
from requests.exceptions import RequestException
API_BASE = 'https://www.mediafire.com'
API_VER = '1.3'
UPLOAD_MIMETYPE = 'application/octet-stream'
FORM_MIMETYPE = 'application/x-www-form-urlencoded'
# Retries on connection errors/timeouts
API_ERROR_MAX_RETRIES = 5
logger = logging.getLogger(__name__)
# Each API call may have lots of parameters, so disable warning
# pylint: disable=too-many-arguments
class QueryParams(dict):
"""dict tailored for MediaFire requests.
* won't store None values
* boolean values are converted to 'yes'/'no'
"""
def __init__(self, defaults=None):
super(QueryParams, self).__init__()
if defaults is not None:
for key, value in defaults.items():
self.__setitem__(key, value)
def __setitem__(self, key, value):
"""Set dict item, handling booleans"""
if value is not None:
if value is True:
value = 'yes'
elif value is False:
value = 'no'
dict.__setitem__(self, key, value)
class MediaFireError(Exception):
"""Base class for MediaFire-related errors"""
pass
class MediaFireApiError(MediaFireError):
"""Base class for API errors"""
def __init__(self, message, code=None):
"""Initialize exception"""
self.code = code
self.message = message
super(MediaFireApiError, self).__init__(message, code)
def __str__(self):
"""Stringify exception"""
return "{}: {}".format(self.code, self.message)
class MediaFireConnectionError(MediaFireError):
"""Low level connection errors"""
pass
class MediaFireApi(object): # pylint: disable=too-many-public-methods
"""Low-level HTTP API Client"""
def __init__(self):
"""Initialize MediaFire Client"""
self.http = requests.Session()
self.http.mount('https://',
HTTPAdapter(max_retries=API_ERROR_MAX_RETRIES))
self._session = None
self._action_tokens = {}
@staticmethod
def _build_uri(action):
"""Build endpoint URI from action"""
return '/api/' + API_VER + '/' + action + '.php'
def _build_query(self, uri, params=None, action_token_type=None):
"""Prepare query string"""
if params is None:
params = QueryParams()
params['response_format'] = 'json'
session_token = None
if action_token_type in self._action_tokens:
# Favor action token
using_action_token = True
session_token = self._action_tokens[action_token_type]
else:
using_action_token = False
if self._session:
session_token = self._session['session_token']
if session_token:
params['session_token'] = session_token
# make order of parameters predictable for testing
keys = list(params.keys())
keys.sort()
query = urlencode([tuple([key, params[key]]) for key in keys])
if not using_action_token and self._session:
secret_key_mod = int(self._session['secret_key']) % 256
signature_base = (str(secret_key_mod) +
self._session['time'] +
uri + '?' + query).encode('ascii')
query += '&signature=' + hashlib.md5(signature_base).hexdigest()
return query
def request(self, action, params=None, action_token_type=None,
upload_info=None, headers=None):
"""Perform request to MediaFire API
action -- "category/name" of method to call
params -- dict of parameters or query string
action_token_type -- action token to use: None, "upload", "image"
upload_info -- in case of upload, dict of "fd" and "filename"
headers -- additional headers to send (used for upload)
session_token and signature generation/update is handled automatically
"""
uri = self._build_uri(action)
if isinstance(params, six.text_type):
query = params
else:
query = self._build_query(uri, params, action_token_type)
if headers is None:
headers = {}
if upload_info is None:
# Use request body for query
data = query
headers['Content-Type'] = FORM_MIMETYPE
else:
# Use query string for query since payload is file
uri += '?' + query
if "filename" in upload_info:
data = MultipartEncoder(
fields={'file': (
upload_info["filename"],
upload_info["fd"],
UPLOAD_MIMETYPE
)}
)
headers["Content-Type"] = data.content_type
else:
data = upload_info["fd"]
headers["Content-Type"] = UPLOAD_MIMETYPE
logger.debug("uri=%s query=%s",
uri, query if not upload_info else None)
try:
# bytes from now on
url = (API_BASE + uri).encode('utf-8')
if isinstance(data, six.text_type):
# request's data is bytes, dict, or filehandle
data = data.encode('utf-8')
response = self.http.post(url, data=data,
headers=headers, stream=True)
except RequestException as ex:
logger.exception("HTTP request failed")
raise MediaFireConnectionError(
"RequestException: {}".format(ex))
return self._process_response(response)
def _process_response(self, response):
"""Parse response"""
forward_raw = False
content_type = response.headers['Content-Type']
if content_type != 'application/json':
logger.debug("headers: %s", response.headers)
# API BUG: text/xml content-type with json payload
# http://forum.mediafiredev.com/showthread.php?136
if content_type == 'text/xml':
# we never request xml, so check it quacks like JSON
if not response.text.lstrip().startswith('{'):
forward_raw = True
else:
# _process_response can't deal with non-json,
# return response as is
forward_raw = True
if forward_raw:
response.raise_for_status()
return response
logger.debug("response: %s", response.text)
# if we are here, then most likely have json
try:
response_node = response.json()['response']
except ValueError:
# promised JSON but failed
raise MediaFireApiError("JSON decode failure")
if response_node.get('new_key', 'no') == 'yes':
self._regenerate_secret_key()
# check for errors
if response_node['result'] != 'Success':
raise MediaFireApiError(response_node['message'],
response_node['error'])
return response_node
def _regenerate_secret_key(self):
"""Regenerate secret key
http://www.mediafire.com/developers/core_api/1.3/getting_started/#call_signature
"""
# Don't regenerate the key if we have none
if self._session and 'secret_key' in self._session:
self._session['secret_key'] = (
int(self._session['secret_key']) * 16807) % 2147483647
@property
def session(self):
"""Returns current session information"""
return self._session
@session.setter
def session(self, value):
"""Set session token
value -- dict returned by user/get_session_token"""
# unset session token
if value is None:
self._session = None
return
if not isinstance(value, dict):
raise ValueError("session info is required")
session_parsed = {}
for key in ["session_token", "time", "secret_key"]:
if key not in value:
raise ValueError("Missing parameter: {}".format(key))
session_parsed[key] = value[key]
for key in ["ekey", "pkey"]:
# nice to have, but not mandatory
if key in value:
session_parsed[key] = value[key]
self._session = session_parsed
@session.deleter
def session(self):
"""Unset session"""
self._session = None
def set_action_token(self, type_=None, action_token=None):
"""Set action tokens
type_ -- either "upload" or "image"
action_token -- string obtained from user/get_action_token,
set None to remove the token
"""
if action_token is None:
del self._action_tokens[type_]
else:
self._action_tokens[type_] = action_token
def user_fetch_tos(self):
"""user/fetch_tos
http://www.mediafire.com/developers/core_api/1.3/user/#fetch_tos
"""
return self.request("user/fetch_tos")
def user_accept_tos(self, acceptance_token):
"""user/accept_tos
http://www.mediafire.com/developers/core_api/1.3/user/#user_top
"""
return self.request("user/accept_tos", QueryParams({
"acceptance_token": acceptance_token
}))
def user_get_session_token(self, app_id=None, email=None, password=None,
ekey=None, fb_access_token=None,
tw_oauth_token=None,
tw_oauth_token_secret=None, api_key=None):
"""user/get_session_token
http://www.mediafire.com/developers/core_api/1.3/user/#get_session_token
"""
if app_id is None:
raise ValueError("app_id must be defined")
params = QueryParams({
'application_id': str(app_id),
'token_version': 2,
'response_format': 'json'
})
if fb_access_token:
params['fb_access_token'] = fb_access_token
signature_keys = ['fb_access_token']
elif tw_oauth_token and tw_oauth_token_secret:
params['tw_oauth_token'] = tw_oauth_token
params['tw_oauth_token_secret'] = tw_oauth_token_secret
signature_keys = ['tw_oauth_token',
'tw_oauth_token_secret']
elif (email or ekey) and password:
signature_keys = []
if email:
signature_keys.append('email')
params['email'] = email
if ekey:
signature_keys.append('ekey')
params['ekey'] = ekey
params['password'] = password
signature_keys.append('password')
else:
raise ValueError("Credentials not provided")
signature_keys.append('application_id')
signature = hashlib.sha1()
for key in signature_keys:
signature.update(str(params[key]).encode('ascii'))
# Note: If the app uses a callback URL to provide its API key,
# or if it does not have the "Require Secret Key" option checked,
# then the API key may be omitted from the signature
if api_key:
signature.update(api_key.encode('ascii'))
query = urlencode(params)
query += '&signature=' + signature.hexdigest()
return self.request('user/get_session_token', params=query)
def user_renew_session_token(self):
"""user/renew_session_token:
http://www.mediafire.com/developers/core_api/1.3/user/#renew_session_token
"""
return self.request('user/renew_session_token')
def user_get_action_token(self, type_=None, lifespan=None):
"""user/get_action_token
http://www.mediafire.com/developers/core_api/1.3/user/#get_action_token
"""
return self.request('user/get_action_token', QueryParams({
'type': type_,
'lifespan': lifespan
}))
def user_destroy_action_token(self, action_token=None):
"""user/destroy_action_token
http://www.mediafire.com/developers/core_api/1.3/user/#destroy_action_token
"""
return self.request('user/destroy_action_token', QueryParams({
'action_token': action_token
}))
def user_get_avatar(self):
"""user/get_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#get_avatar
"""
return self.request("user/get_avatar")
def user_get_info(self):
"""user/get_info
http://www.mediafire.com/developers/core_api/1.3/user/#get_info
"""
return self.request("user/get_info")
def user_get_limits(self):
"""user/get_limits
http://www.mediafire.com/developers/core_api/1.3/user/#get_limits
"""
return self.request("user/get_limits")
def user_get_settings(self):
"""user/get_settings
http://www.mediafire.com/developers/core_api/1.3/user/#get_settings
"""
return self.request("user/get_settings")
def user_set_avatar(self, action=None, quick_key=None, url=None):
"""user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
"""
return self.request("user/set_avatar", QueryParams({
"action": action,
"quick_key": quick_key,
"url": url
}))
def user_update(self, display_name=None, first_name=None, last_name=None,
email=None, password=None, current_password=None,
birth_date=None, gender=None, website=None, subdomain=None,
location=None, newsletter=None, primary_usage=None,
timezone=None):
"""
user/update
http://www.mediafire.com/developers/core_api/1.3/user/#update
"""
return self.request("user/update", QueryParams({
"display_name": display_name,
"first_name": first_name,
"last_name": last_name,
"email": email,
"password": password,
"current_password": current_password,
"birth_date": birth_date,
"gender": gender,
"website": website,
"subdomain": subdomain,
"location": location,
"newsletter": newsletter,
"primary_usage": primary_usage,
"timezone": timezone
}))
def folder_get_info(self, folder_key=None, device_id=None, details=None):
"""folder/get_info
http://www.mediafire.com/developers/core_api/1.3/folder/#get_info
"""
return self.request('folder/get_info', QueryParams({
'folder_key': folder_key,
'device_id': device_id,
'details': details
}))
def folder_get_content(self, folder_key=None, content_type=None,
filter_=None, device_id=None, order_by=None,
order_direction=None, chunk=None, details=None,
chunk_size=None):
"""folder/get_content
http://www.mediafire.com/developers/core_api/1.3/folder/#get_content
"""
return self.request('folder/get_content', QueryParams({
'folder_key': folder_key,
'content_type': content_type,
'filter': filter_,
'device_id': device_id,
'order_by': order_by,
'order_direction': order_direction,
'chunk': chunk,
'details': details,
'chunk_size': chunk_size
}))
def folder_update(self, folder_key, foldername=None, description=None,
privacy=None, privacy_recursive=None, mtime=None):
"""folder/update
http://www.mediafire.com/developers/core_api/1.3/folder/#update
"""
return self.request('folder/update', QueryParams({
'folder_key': folder_key,
'foldername': foldername,
'description': description,
'privacy': privacy,
'privacy_recursive': privacy_recursive,
'mtime': mtime
}))
def folder_create(self, foldername=None, parent_key=None,
action_on_duplicate=None, mtime=None):
"""folder/create
http://www.mediafire.com/developers/core_api/1.3/folder/#create
"""
return self.request('folder/create', QueryParams({
'foldername': foldername,
'parent_key': parent_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime
}))
def folder_delete(self, folder_key):
"""folder/delete
http://www.mediafire.com/developers/core_api/1.3/folder/#delete
"""
return self.request('folder/delete', QueryParams({
'folder_key': folder_key
}))
def folder_purge(self, folder_key):
"""folder/purge
http://www.mediafire.com/developers/core_api/1.3/folder/#purge
"""
return self.request('folder/purge', QueryParams({
'folder_key': folder_key
}))
def folder_move(self, folder_key_src, folder_key_dst=None):
"""folder/move
http://www.mediafire.com/developers/core_api/1.3/folder/#move
"""
return self.request('folder/move', QueryParams({
'folder_key_src': folder_key_src,
'folder_key_dst': folder_key_dst
}))
def upload_check(self, filename=None, folder_key=None, filedrop_key=None,
size=None, hash_=None, path=None, resumable=None):
"""upload/check
http://www.mediafire.com/developers/core_api/1.3/upload/#check
"""
return self.request('upload/check', QueryParams({
'filename': filename,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'size': size,
'hash': hash_,
'path': path,
'resumable': resumable
}))
def upload_simple(self, fd, filename, folder_key=None, path=None,
filedrop_key=None, action_on_duplicate=None,
mtime=None, file_size=None, file_hash=None):
"""upload/simple
http://www.mediafire.com/developers/core_api/1.3/upload/#simple
"""
action = 'upload/simple'
params = QueryParams({
'folder_key': folder_key,
'path': path,
'filedrop_key': filedrop_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime
})
headers = QueryParams({
'X-Filesize': str(file_size),
'X-Filehash': file_hash,
'X-Filename': filename.encode('utf-8')
})
upload_info = {
"fd": fd,
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers)
# pylint: disable=too-many-locals
# The API requires us to provide all of that
def upload_resumable(self, fd, filesize, filehash, unit_hash, unit_id,
unit_size, quick_key=None, action_on_duplicate=None,
mtime=None, version_control=None, folder_key=None,
filedrop_key=None, path=None, previous_hash=None):
"""upload/resumable
http://www.mediafire.com/developers/core_api/1.3/upload/#resumable
"""
action = 'upload/resumable'
headers = {
'x-filesize': str(filesize),
'x-filehash': filehash,
'x-unit-hash': unit_hash,
'x-unit-id': str(unit_id),
'x-unit-size': str(unit_size)
}
params = QueryParams({
'quick_key': quick_key,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime,
'version_control': version_control,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'path': path,
'previous_hash': previous_hash
})
upload_info = {
"fd": fd,
"filename": "chunk"
}
return self.request(action, params, action_token_type="upload",
upload_info=upload_info, headers=headers)
# pylint: enable=too-many-locals
def upload_instant(self, filename, size, hash_, quick_key=None,
folder_key=None, filedrop_key=None, path=None,
action_on_duplicate=None, mtime=None,
version_control=None, previous_hash=None):
"""upload/instant
http://www.mediafire.com/developers/core_api/1.3/upload/#instant
"""
return self.request('upload/instant', QueryParams({
'filename': filename,
'size': size,
'hash': hash_,
'quick_key': quick_key,
'folder_key': folder_key,
'filedrop_key': filedrop_key,
'path': path,
'action_on_duplicate': action_on_duplicate,
'mtime': mtime,
'version_control': version_control,
'previous_hash': previous_hash
}))
def upload_poll(self, key):
"""upload/poll
http://www.mediafire.com/developers/core_api/1.3/upload/#poll_upload
"""
return self.request('upload/poll_upload', QueryParams({
'key': key
}))
def file_get_info(self, quick_key=None):
"""file/get_info
http://www.mediafire.com/developers/core_api/1.3/file/#get_info
"""
return self.request('file/get_info', QueryParams({
'quick_key': quick_key
}))
def file_get_links(self, quick_key, link_type=None):
"""file/get_links
http://www.mediafire.com/developers/core_api/1.3/file/#get_links
"""
return self.request('file/get_links', QueryParams({
'quick_key': quick_key,
'link_type': link_type,
}))
def file_update(self, quick_key, filename=None, description=None,
mtime=None, privacy=None):
"""file/update
http://www.mediafire.com/developers/core_api/1.3/file/#update
"""
return self.request('file/update', QueryParams({
'quick_key': quick_key,
'filename': filename,
'description': description,
'mtime': mtime,
'privacy': privacy
}))
def file_update_file(self, quick_key, file_extension=None, filename=None,
description=None, mtime=None, privacy=None,
timezone=None):
"""file/update_file
http://www.mediafire.com/developers/core_api/1.3/file/#update_file
"""
return self.request('file/update', QueryParams({
'quick_key': quick_key,
'file_extension': file_extension,
'filename': filename,
'description': description,
'mtime': mtime,
'privacy': privacy,
'timezone': timezone
}))
def file_delete(self, quick_key):
"""file/delete
http://www.mediafire.com/developers/core_api/1.3/file/#delete
"""
return self.request('file/delete', QueryParams({
'quick_key': quick_key
}))
def file_move(self, quick_key, folder_key=None):
"""file/move
http://www.mediafire.com/developers/core_api/1.3/file/#move
"""
return self.request('file/move', QueryParams({
'quick_key': quick_key,
'folder_key': folder_key
}))
def file_purge(self, quick_key):
"""file/purge
http://www.mediafire.com/developers/core_api/1.3/file/#purge
"""
return self.request('file/purge', QueryParams({
'quick_key': quick_key
}))
def file_zip(self, keys, confirm_download=None, meta_only=None):
"""file/zip
http://www.mediafire.com/developers/core_api/1.3/file/#zip
"""
return self.request('file/zip', QueryParams({
'keys': keys,
'confirm_download': confirm_download,
'meta_only': meta_only
}))
def system_get_info(self):
"""system/get_info
http://www.mediafire.com/developers/core_api/1.3/system/#get_info
"""
return self.request('system/get_info')
def system_get_status(self):
"""system/get_status
http://www.mediafire.com/developers/core_api/1.3/system/#get_status
"""
return self.request('system/get_status')
|
MediaFire/mediafire-python-open-sdk
|
mediafire/api.py
|
Python
|
bsd-2-clause
| 25,118
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Increase length of password column in connection table
Revision ID: c1840b4bcf1a
Revises: 004c1210f153
Create Date: 2019-10-02 16:56:54.865550
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = 'c1840b4bcf1a'
down_revision = '004c1210f153'
branch_labels = None
depends_on = None
def upgrade():
conn = op.get_bind()
if conn.dialect.name == 'sqlite':
# SQLite does not allow column modifications so we need to skip this migration
return
op.alter_column(table_name='connection',
column_name='password',
type_=sa.String(length=5000))
def downgrade():
# Can't be undone
pass
|
Fokko/incubator-airflow
|
airflow/migrations/versions/c1840b4bcf1a_increase_length_of_password_column_in_.py
|
Python
|
apache-2.0
| 1,498
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TroubleshootingDetails(Model):
"""Information gained from troubleshooting of specified resource.
:param id: The id of the get troubleshoot operation.
:type id: str
:param reason_type: Reason type of failure.
:type reason_type: str
:param summary: A summary of troubleshooting.
:type summary: str
:param detail: Details on troubleshooting results.
:type detail: str
:param recommended_actions: List of recommended actions.
:type recommended_actions:
list[~azure.mgmt.network.v2017_03_01.models.TroubleshootingRecommendedActions]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'reason_type': {'key': 'reasonType', 'type': 'str'},
'summary': {'key': 'summary', 'type': 'str'},
'detail': {'key': 'detail', 'type': 'str'},
'recommended_actions': {'key': 'recommendedActions', 'type': '[TroubleshootingRecommendedActions]'},
}
def __init__(self, *, id: str=None, reason_type: str=None, summary: str=None, detail: str=None, recommended_actions=None, **kwargs) -> None:
super(TroubleshootingDetails, self).__init__(**kwargs)
self.id = id
self.reason_type = reason_type
self.summary = summary
self.detail = detail
self.recommended_actions = recommended_actions
|
lmazuel/azure-sdk-for-python
|
azure-mgmt-network/azure/mgmt/network/v2017_03_01/models/troubleshooting_details_py3.py
|
Python
|
mit
| 1,841
|
from fabric.api import *
import subprocess
main_dir = subprocess.check_output("git rev-parse --show-toplevel", shell=True).rstrip()
env.warn_only = True
# Testing
@task
def test_role(puppetrole='docker_test_role', image='centos-7') :
"""[local] Test a role on the specified OS on a Docker image"""
local( 'cd ' + main_dir + '/bin ; ./docker_test_role.sh ' + str(puppetrole) + ' ' + str(image) )
# Building
@task
def rocker_build_role(puppetrole='docker_rocker_build', image='ubuntu1404'):
"""[local] WIP Rockerize a role on all or the specified image OS (data in hieradata/role/$puppetrole.yaml)"""
local( 'cd ' + main_dir + '/bin ; ./docker_rocker_build_role.sh ' + str(puppetrole) + ' ' + str(image) )
@task
def tp_build_role(puppetrole='docker_tp_build', image='centos7'):
"""[local] Dockerize a role based on tp on all or the specified Docker (data in hieradata/role/$puppetrole.yaml)"""
local( 'cd ' + main_dir + '/bin ; ./docker_tp_build_role.sh ' + str(puppetrole) + ' ' + str(image) )
# Maintenance
@task
def setup():
"""[local] Install locally Docker (needs su privileges)"""
local( main_dir + "/bin/docker_setup.sh" )
@task
def status():
"""[local] Show Docker status info"""
local( 'cd ' + main_dir + '/bin ; ./docker_status.sh ' )
@task
def purge(mode=''):
"""[local] Clean up docker images and containers (CAUTION)"""
local( 'cd ' + main_dir + '/bin ; ./docker_purge.sh ' + str(mode))
|
snesbittsea/psick
|
fabfile/docker.py
|
Python
|
apache-2.0
| 1,432
|
# Copyright: 2009 Nadia Alramli
# License: BSD
"""Terminal controller module
Example of usage:
print BG_BLUE + 'Text on blue background' + NORMAL
print BLUE + UNDERLINE + 'Blue underlined text' + NORMAL
print BLUE + BG_YELLOW + BOLD + 'text' + NORMAL
"""
import sys
# The current module
MODULE = sys.modules[__name__]
COLORS = "BLUE GREEN CYAN RED MAGENTA YELLOW WHITE BLACK".split()
# List of terminal controls, you can add more to the list.
CONTROLS = {
'BOL':'cr', 'UP':'cuu1', 'DOWN':'cud1', 'LEFT':'cub1', 'RIGHT':'cuf1',
'CLEAR_SCREEN':'clear', 'CLEAR_EOL':'el', 'CLEAR_BOL':'el1',
'CLEAR_EOS':'ed', 'BOLD':'bold', 'BLINK':'blink', 'DIM':'dim',
'REVERSE':'rev', 'UNDERLINE':'smul', 'NORMAL':'sgr0',
'HIDE_CURSOR':'cinvis', 'SHOW_CURSOR':'cnorm'
}
# List of numeric capabilities
VALUES = {
'COLUMNS':'cols', # Width of the terminal (None for unknown)
'LINES':'lines', # Height of the terminal (None for unknown)
'MAX_COLORS': 'colors',
}
def default():
"""Set the default attribute values"""
for color in COLORS:
setattr(MODULE, color, '')
setattr(MODULE, 'BG_%s' % color, '')
for control in CONTROLS:
setattr(MODULE, control, '')
for value in VALUES:
setattr(MODULE, value, None)
def setup():
"""Set the terminal control strings"""
# Initializing the terminal
curses.setupterm()
# Get the color escape sequence template or '' if not supported
# setab and setaf are for ANSI escape sequences
bgColorSeq = curses.tigetstr('setab') or curses.tigetstr('setb') or ''
fgColorSeq = curses.tigetstr('setaf') or curses.tigetstr('setf') or ''
for color in COLORS:
# Get the color index from curses
colorIndex = getattr(curses, 'COLOR_%s' % color)
# Set the color escape sequence after filling the template with index
setattr(MODULE, color, curses.tparm(fgColorSeq, colorIndex))
# Set background escape sequence
setattr(
MODULE, 'BG_%s' % color, curses.tparm(bgColorSeq, colorIndex)
)
for control in CONTROLS:
# Set the control escape sequence
setattr(MODULE, control, curses.tigetstr(CONTROLS[control]) or '')
for value in VALUES:
# Set terminal related values
setattr(MODULE, value, curses.tigetnum(VALUES[value]))
def render(text):
"""Helper function to apply controls easily
Example:
apply("%(GREEN)s%(BOLD)stext%(NORMAL)s") -> a bold green text
"""
return text % MODULE.__dict__
try:
import curses
setup()
except Exception, e:
# There is a failure; set all attributes to default
print 'Warning: %s' % e
default()
|
borzole/borzole
|
bin/terminal.py
|
Python
|
lgpl-3.0
| 2,492
|
# Copied from https://github.com/ohmu/ohmu_common_py version.py version 0.0.1-0-unknown-fa54b44
"""
pglookout - version detection and version.py __version__ generation
Copyright (c) 2015 Ohmu Ltd
See LICENSE for details
"""
import imp
import os
import subprocess
def save_version(new_ver, old_ver, version_file):
if not new_ver:
return False
version_file = os.path.join(os.path.dirname(__file__), version_file)
if not old_ver or new_ver != old_ver:
with open(version_file, "w") as fp:
fp.write("__version__ = '{}'\n".format(new_ver))
return True
def get_project_version(version_file):
version_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), version_file)
try:
module = imp.load_source("verfile", version_file)
file_ver = module.__version__
except IOError:
file_ver = None
os.chdir(os.path.dirname(__file__) or ".")
try:
git_out = subprocess.check_output(["git", "describe", "--always"],
stderr=getattr(subprocess, "DEVNULL", None))
except (OSError, subprocess.CalledProcessError):
pass
else:
git_ver = git_out.splitlines()[0].strip().decode("utf-8")
if "." not in git_ver:
git_ver = "0.0.1-0-unknown-{}".format(git_ver)
if save_version(git_ver, file_ver, version_file):
return git_ver
makefile = os.path.join(os.path.dirname(__file__), "Makefile")
if os.path.exists(makefile):
with open(makefile, "r") as fp:
lines = fp.readlines()
short_ver = [line.split("=", 1)[1].strip() for line in lines if line.startswith("short_ver")][0]
if save_version(short_ver, file_ver, version_file):
return short_ver
if not file_ver:
raise Exception("version not available from git or from file {!r}".format(version_file))
return file_ver
if __name__ == "__main__":
import sys
get_project_version(sys.argv[1])
|
ohmu/pglookout
|
version.py
|
Python
|
apache-2.0
| 2,001
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-11-05 20:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='HomepageBlock',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('object_id', models.PositiveIntegerField()),
('publish_at', models.DateTimeField()),
('position', models.CharField(choices=[('HERO', 'Hero'), ('SEC_1', 'Secondary 1'), ('SEC_2', 'Secondary 2')], max_length=12)),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
),
]
|
urfonline/api
|
api/home/migrations/0001_initial.py
|
Python
|
mit
| 980
|
# -*- Mode: Python -*-
# GObject-Introspection - a framework for introspecting GObject libraries
# Copyright (C) 2008 Johan Dahlin
# Copyright (C) 2008, 2009 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import with_statement
from . import ast
from .xmlwriter import XMLWriter
# Bump this for *incompatible* changes to the .gir.
# Compatible changes we just make inline
COMPATIBLE_GIR_VERSION = '1.2'
class GIRWriter(XMLWriter):
def __init__(self, namespace, shlibs, includes, pkgs, c_includes):
super(GIRWriter, self).__init__()
self.write_comment(
'''This file was automatically generated from C sources - DO NOT EDIT!
To affect the contents of this file, edit the original C definitions,
and/or use gtk-doc annotations. ''')
self._write_repository(namespace, shlibs, includes, pkgs,
c_includes)
def _write_repository(self, namespace, shlibs, includes=None,
packages=None, c_includes=None):
if includes is None:
includes = frozenset()
if packages is None:
packages = frozenset()
if c_includes is None:
c_includes = frozenset()
attrs = [
('version', COMPATIBLE_GIR_VERSION),
('xmlns', 'http://www.gtk.org/introspection/core/1.0'),
('xmlns:c', 'http://www.gtk.org/introspection/c/1.0'),
('xmlns:glib', 'http://www.gtk.org/introspection/glib/1.0'),
]
with self.tagcontext('repository', attrs):
for include in sorted(includes):
self._write_include(include)
for pkg in sorted(set(packages)):
self._write_pkgconfig_pkg(pkg)
for c_include in sorted(set(c_includes)):
self._write_c_include(c_include)
self._namespace = namespace
self._write_namespace(namespace, shlibs)
self._namespace = None
def _write_include(self, include):
attrs = [('name', include.name), ('version', include.version)]
self.write_tag('include', attrs)
def _write_pkgconfig_pkg(self, package):
attrs = [('name', package)]
self.write_tag('package', attrs)
def _write_c_include(self, c_include):
attrs = [('name', c_include)]
self.write_tag('c:include', attrs)
def _write_namespace(self, namespace, shlibs):
attrs = [('name', namespace.name),
('version', namespace.version),
('shared-library', ','.join(shlibs)),
('c:identifier-prefixes', ','.join(namespace.identifier_prefixes)),
('c:symbol-prefixes', ','.join(namespace.symbol_prefixes))]
with self.tagcontext('namespace', attrs):
# We define a custom sorting function here because
# we want aliases to be first. They're a bit
# special because the typelib compiler expands them.
def nscmp(a, b):
if isinstance(a, ast.Alias):
if isinstance(b, ast.Alias):
return cmp(a.name, b.name)
else:
return -1
elif isinstance(b, ast.Alias):
return 1
else:
return cmp(a, b)
for node in sorted(namespace.itervalues(), cmp=nscmp):
self._write_node(node)
def _write_node(self, node):
if isinstance(node, ast.Function):
self._write_function(node)
elif isinstance(node, ast.Enum):
self._write_enum(node)
elif isinstance(node, ast.Bitfield):
self._write_bitfield(node)
elif isinstance(node, (ast.Class, ast.Interface)):
self._write_class(node)
elif isinstance(node, ast.Callback):
self._write_callback(node)
elif isinstance(node, ast.Record):
self._write_record(node)
elif isinstance(node, ast.Union):
self._write_union(node)
elif isinstance(node, ast.Boxed):
self._write_boxed(node)
elif isinstance(node, ast.Member):
# FIXME: atk_misc_instance singleton
pass
elif isinstance(node, ast.Alias):
self._write_alias(node)
elif isinstance(node, ast.Constant):
self._write_constant(node)
else:
print 'WRITER: Unhandled node', node
def _append_version(self, node, attrs):
if node.version:
attrs.append(('version', node.version))
def _write_generic(self, node):
for key, value in node.attributes:
self.write_tag('attribute', [('name', key), ('value', value)])
if hasattr(node, 'doc') and node.doc:
self.write_tag('doc', [('xml:whitespace', 'preserve')],
node.doc)
def _append_node_generic(self, node, attrs):
if node.skip or not node.introspectable:
attrs.append(('introspectable', '0'))
if node.deprecated:
attrs.append(('deprecated', node.deprecated))
if node.deprecated_version:
attrs.append(('deprecated-version',
node.deprecated_version))
def _append_throws(self, func, attrs):
if func.throws:
attrs.append(('throws', '1'))
def _write_alias(self, alias):
attrs = [('name', alias.name)]
if alias.ctype is not None:
attrs.append(('c:type', alias.ctype))
self._append_node_generic(alias, attrs)
with self.tagcontext('alias', attrs):
self._write_generic(alias)
self._write_type_ref(alias.target)
def _write_callable(self, callable, tag_name, extra_attrs):
attrs = [('name', callable.name)]
attrs.extend(extra_attrs)
self._append_version(callable, attrs)
self._append_node_generic(callable, attrs)
self._append_throws(callable, attrs)
with self.tagcontext(tag_name, attrs):
self._write_generic(callable)
self._write_return_type(callable.retval, parent=callable)
self._write_parameters(callable, callable.parameters)
def _write_function(self, func, tag_name='function'):
attrs = []
if hasattr(func, 'symbol'):
attrs.append(('c:identifier', func.symbol))
if func.shadowed_by:
attrs.append(('shadowed-by', func.shadowed_by))
elif func.shadows:
attrs.append(('shadows', func.shadows))
if func.moved_to is not None:
attrs.append(('moved-to', func.moved_to))
self._write_callable(func, tag_name, attrs)
def _write_method(self, method):
self._write_function(method, tag_name='method')
def _write_static_method(self, method):
self._write_function(method, tag_name='function')
def _write_constructor(self, method):
self._write_function(method, tag_name='constructor')
def _write_return_type(self, return_, parent=None):
if not return_:
return
attrs = []
if return_.transfer:
attrs.append(('transfer-ownership', return_.transfer))
if return_.skip:
attrs.append(('skip', '1'))
with self.tagcontext('return-value', attrs):
self._write_generic(return_)
self._write_type(return_.type, function=parent)
def _write_parameters(self, parent, parameters):
if not parameters:
return
with self.tagcontext('parameters'):
for parameter in parameters:
self._write_parameter(parent, parameter)
def _write_parameter(self, parent, parameter):
attrs = []
if parameter.argname is not None:
attrs.append(('name', parameter.argname))
if (parameter.direction is not None) and (parameter.direction != 'in'):
attrs.append(('direction', parameter.direction))
attrs.append(('caller-allocates',
'1' if parameter.caller_allocates else '0'))
if parameter.transfer:
attrs.append(('transfer-ownership',
parameter.transfer))
if parameter.allow_none:
attrs.append(('allow-none', '1'))
if parameter.scope:
attrs.append(('scope', parameter.scope))
if parameter.closure_name is not None:
idx = parent.get_parameter_index(parameter.closure_name)
attrs.append(('closure', '%d' % (idx, )))
if parameter.destroy_name is not None:
idx = parent.get_parameter_index(parameter.destroy_name)
attrs.append(('destroy', '%d' % (idx, )))
if parameter.skip:
attrs.append(('skip', '1'))
with self.tagcontext('parameter', attrs):
self._write_generic(parameter)
self._write_type(parameter.type, function=parent)
def _type_to_name(self, typeval):
if not typeval.resolved:
raise AssertionError("Caught unresolved type %r (ctype=%r)" % (typeval, typeval.ctype))
assert typeval.target_giname is not None
prefix = self._namespace.name + '.'
if typeval.target_giname.startswith(prefix):
return typeval.target_giname[len(prefix):]
return typeval.target_giname
def _write_type_ref(self, ntype):
""" Like _write_type, but only writes the type name rather than the full details """
assert isinstance(ntype, ast.Type), ntype
attrs = []
if ntype.ctype:
attrs.append(('c:type', ntype.ctype))
if isinstance(ntype, ast.Array):
if ntype.array_type != ast.Array.C:
attrs.insert(0, ('name', ntype.array_type))
elif isinstance(ntype, ast.List):
if ntype.name:
attrs.insert(0, ('name', ntype.name))
elif isinstance(ntype, ast.Map):
attrs.insert(0, ('name', 'GLib.HashTable'))
else:
if ntype.target_giname:
attrs.insert(0, ('name', self._type_to_name(ntype)))
elif ntype.target_fundamental:
attrs.insert(0, ('name', ntype.target_fundamental))
self.write_tag('type', attrs)
def _write_type(self, ntype, relation=None, function=None):
assert isinstance(ntype, ast.Type), ntype
attrs = []
if ntype.ctype:
attrs.append(('c:type', ntype.ctype))
if isinstance(ntype, ast.Varargs):
with self.tagcontext('varargs', []):
pass
elif isinstance(ntype, ast.Array):
if ntype.array_type != ast.Array.C:
attrs.insert(0, ('name', ntype.array_type))
# we insert an explicit 'zero-terminated' attribute
# when it is false, or when it would not be implied
# by the absence of length and fixed-size
if not ntype.zeroterminated:
attrs.insert(0, ('zero-terminated', '0'))
elif (ntype.zeroterminated
and (ntype.size is not None or ntype.length_param_name is not None)):
attrs.insert(0, ('zero-terminated', '1'))
if ntype.size is not None:
attrs.append(('fixed-size', '%d' % (ntype.size, )))
if ntype.length_param_name is not None:
assert function
attrs.insert(0, ('length', '%d'
% (function.get_parameter_index(ntype.length_param_name, ))))
with self.tagcontext('array', attrs):
self._write_type(ntype.element_type)
elif isinstance(ntype, ast.List):
if ntype.name:
attrs.insert(0, ('name', ntype.name))
with self.tagcontext('type', attrs):
self._write_type(ntype.element_type)
elif isinstance(ntype, ast.Map):
attrs.insert(0, ('name', 'GLib.HashTable'))
with self.tagcontext('type', attrs):
self._write_type(ntype.key_type)
self._write_type(ntype.value_type)
else:
# REWRITEFIXME - enable this for 1.2
if ntype.target_giname:
attrs.insert(0, ('name', self._type_to_name(ntype)))
elif ntype.target_fundamental:
# attrs = [('fundamental', ntype.target_fundamental)]
attrs.insert(0, ('name', ntype.target_fundamental))
elif ntype.target_foreign:
attrs.insert(0, ('foreign', '1'))
self.write_tag('type', attrs)
def _append_registered(self, node, attrs):
assert isinstance(node, ast.Registered)
if node.get_type:
attrs.extend([('glib:type-name', node.gtype_name),
('glib:get-type', node.get_type)])
def _write_enum(self, enum):
attrs = [('name', enum.name)]
self._append_version(enum, attrs)
self._append_node_generic(enum, attrs)
self._append_registered(enum, attrs)
attrs.append(('c:type', enum.ctype))
if enum.error_domain:
attrs.append(('glib:error-domain', enum.error_domain))
with self.tagcontext('enumeration', attrs):
self._write_generic(enum)
for member in enum.members:
self._write_member(member)
for method in sorted(enum.static_methods):
self._write_static_method(method)
def _write_bitfield(self, bitfield):
attrs = [('name', bitfield.name)]
self._append_version(bitfield, attrs)
self._append_node_generic(bitfield, attrs)
self._append_registered(bitfield, attrs)
attrs.append(('c:type', bitfield.ctype))
with self.tagcontext('bitfield', attrs):
self._write_generic(bitfield)
for member in bitfield.members:
self._write_member(member)
for method in sorted(bitfield.static_methods):
self._write_static_method(method)
def _write_member(self, member):
attrs = [('name', member.name),
('value', str(member.value)),
('c:identifier', member.symbol)]
if member.nick is not None:
attrs.append(('glib:nick', member.nick))
self.write_tag('member', attrs)
def _write_constant(self, constant):
attrs = [('name', constant.name),
('value', constant.value),
('c:type', constant.ctype)]
with self.tagcontext('constant', attrs):
self._write_type(constant.value_type)
def _write_class(self, node):
attrs = [('name', node.name),
('c:symbol-prefix', node.c_symbol_prefix),
('c:type', node.ctype)]
self._append_version(node, attrs)
self._append_node_generic(node, attrs)
if isinstance(node, ast.Class):
tag_name = 'class'
if node.parent is not None:
attrs.append(('parent',
self._type_to_name(node.parent)))
if node.is_abstract:
attrs.append(('abstract', '1'))
else:
assert isinstance(node, ast.Interface)
tag_name = 'interface'
attrs.append(('glib:type-name', node.gtype_name))
if node.get_type is not None:
attrs.append(('glib:get-type', node.get_type))
if node.glib_type_struct is not None:
attrs.append(('glib:type-struct',
self._type_to_name(node.glib_type_struct)))
if isinstance(node, ast.Class):
if node.fundamental:
attrs.append(('glib:fundamental', '1'))
if node.ref_func:
attrs.append(('glib:ref-func', node.ref_func))
if node.unref_func:
attrs.append(('glib:unref-func', node.unref_func))
if node.set_value_func:
attrs.append(('glib:set-value-func', node.set_value_func))
if node.get_value_func:
attrs.append(('glib:get-value-func', node.get_value_func))
with self.tagcontext(tag_name, attrs):
self._write_generic(node)
if isinstance(node, ast.Class):
for iface in sorted(node.interfaces):
self.write_tag('implements',
[('name', self._type_to_name(iface))])
if isinstance(node, ast.Interface):
for iface in sorted(node.prerequisites):
self.write_tag('prerequisite',
[('name', self._type_to_name(iface))])
if isinstance(node, ast.Class):
for method in sorted(node.constructors):
self._write_constructor(method)
if isinstance(node, (ast.Class, ast.Interface)):
for method in sorted(node.static_methods):
self._write_static_method(method)
for vfunc in sorted(node.virtual_methods):
self._write_vfunc(vfunc)
for method in sorted(node.methods):
self._write_method(method)
for prop in sorted(node.properties):
self._write_property(prop)
for field in node.fields:
self._write_field(field)
for signal in sorted(node.signals):
self._write_signal(signal)
def _write_boxed(self, boxed):
attrs = [('glib:name', boxed.name)]
if boxed.c_symbol_prefix is not None:
attrs.append(('c:symbol-prefix', boxed.c_symbol_prefix))
self._append_registered(boxed, attrs)
with self.tagcontext('glib:boxed', attrs):
self._write_generic(boxed)
for method in sorted(boxed.constructors):
self._write_constructor(method)
for method in sorted(boxed.methods):
self._write_method(method)
for method in sorted(boxed.static_methods):
self._write_static_method(method)
def _write_property(self, prop):
attrs = [('name', prop.name)]
self._append_version(prop, attrs)
self._append_node_generic(prop, attrs)
# Properties are assumed to be readable (see also generate.c)
if not prop.readable:
attrs.append(('readable', '0'))
if prop.writable:
attrs.append(('writable', '1'))
if prop.construct:
attrs.append(('construct', '1'))
if prop.construct_only:
attrs.append(('construct-only', '1'))
if prop.transfer:
attrs.append(('transfer-ownership', prop.transfer))
with self.tagcontext('property', attrs):
self._write_generic(prop)
self._write_type(prop.type)
def _write_vfunc(self, vf):
attrs = []
if vf.invoker:
attrs.append(('invoker', vf.invoker))
self._write_callable(vf, 'virtual-method', attrs)
def _write_callback(self, callback):
attrs = []
if callback.namespace:
attrs.append(('c:type', callback.ctype or callback.c_name))
self._write_callable(callback, 'callback', attrs)
def _write_record(self, record, extra_attrs=[]):
is_gtype_struct = False
attrs = list(extra_attrs)
if record.name is not None:
attrs.append(('name', record.name))
if record.ctype is not None: # the record might be anonymous
attrs.append(('c:type', record.ctype))
if record.disguised:
attrs.append(('disguised', '1'))
if record.foreign:
attrs.append(('foreign', '1'))
if record.is_gtype_struct_for is not None:
is_gtype_struct = True
attrs.append(('glib:is-gtype-struct-for',
self._type_to_name(record.is_gtype_struct_for)))
self._append_version(record, attrs)
self._append_node_generic(record, attrs)
self._append_registered(record, attrs)
if record.c_symbol_prefix:
attrs.append(('c:symbol-prefix', record.c_symbol_prefix))
with self.tagcontext('record', attrs):
self._write_generic(record)
if record.fields:
for field in record.fields:
self._write_field(field, is_gtype_struct)
for method in sorted(record.constructors):
self._write_constructor(method)
for method in sorted(record.methods):
self._write_method(method)
for method in sorted(record.static_methods):
self._write_static_method(method)
def _write_union(self, union):
attrs = []
if union.name is not None:
attrs.append(('name', union.name))
if union.ctype is not None: # the union might be anonymous
attrs.append(('c:type', union.ctype))
self._append_version(union, attrs)
self._append_node_generic(union, attrs)
self._append_registered(union, attrs)
if union.c_symbol_prefix:
attrs.append(('c:symbol-prefix', union.c_symbol_prefix))
with self.tagcontext('union', attrs):
self._write_generic(union)
if union.fields:
for field in union.fields:
self._write_field(field)
for method in sorted(union.constructors):
self._write_constructor(method)
for method in sorted(union.methods):
self._write_method(method)
for method in sorted(union.static_methods):
self._write_static_method(method)
def _write_field(self, field, is_gtype_struct=False):
if field.anonymous_node:
if isinstance(field.anonymous_node, ast.Callback):
attrs = [('name', field.name)]
self._append_node_generic(field, attrs)
with self.tagcontext('field', attrs):
self._write_callback(field.anonymous_node)
elif isinstance(field.anonymous_node, ast.Record):
self._write_record(field.anonymous_node)
elif isinstance(field.anonymous_node, ast.Union):
self._write_union(field.anonymous_node)
else:
raise AssertionError("Unknown field anonymous: %r" \
% (field.anonymous_node, ))
else:
attrs = [('name', field.name)]
self._append_node_generic(field, attrs)
# Fields are assumed to be read-only
# (see also girparser.c and generate.c)
if not field.readable:
attrs.append(('readable', '0'))
if field.writable:
attrs.append(('writable', '1'))
if field.bits:
attrs.append(('bits', str(field.bits)))
if field.private:
attrs.append(('private', '1'))
with self.tagcontext('field', attrs):
self._write_generic(field)
self._write_type(field.type)
def _write_signal(self, signal):
attrs = [('name', signal.name)]
if signal.when:
attrs.append(('when', signal.when))
if signal.no_recurse:
attrs.append(('no-recurse', '1'))
if signal.detailed:
attrs.append(('detailed', '1'))
if signal.action:
attrs.append(('action', '1'))
if signal.no_hooks:
attrs.append(('no-hooks', '1'))
self._append_version(signal, attrs)
self._append_node_generic(signal, attrs)
with self.tagcontext('glib:signal', attrs):
self._write_generic(signal)
self._write_return_type(signal.retval)
self._write_parameters(signal, signal.parameters)
|
kerrickstaley/GObject-Introspection-Docutils
|
giscanner/girwriter.py
|
Python
|
gpl-2.0
| 24,554
|
# This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import vobject
from trytond.tools import reduce_ids, grouped_slice
from trytond.transaction import Transaction
from trytond.pool import Pool, PoolMeta
__all__ = ['Event']
__metaclass__ = PoolMeta
class Event:
__name__ = 'calendar.event'
@classmethod
def __setup__(cls):
super(Event, cls).__setup__()
cls._error_messages.update({
'transparent': 'Free',
'opaque': 'Busy',
})
@classmethod
def search(cls, domain, offset=0, limit=None, order=None, count=False,
query=False):
if Transaction().user:
domain = domain[:]
domain = [domain,
['OR',
[
('classification', '=', 'private'),
['OR',
('calendar.owner', '=', Transaction().user),
('calendar.write_users', '=', Transaction().user),
],
],
('classification', '!=', 'private'),
],
]
records = super(Event, cls).search(domain, offset=offset, limit=limit,
order=order, count=count, query=query)
if Transaction().user:
# Clear the cache as it was not cleaned for confidential
cache = Transaction().get_cache()
cache.pop(cls.__name__, None)
return records
@classmethod
def create(cls, vlist):
events = super(Event, cls).create(vlist)
if (cls.search([('id', 'in', [x.id for x in events])], count=True)
!= len(events)):
cls.raise_user_error('access_error', cls.__doc__)
return events
@classmethod
def _clean_confidential(cls, record, transp):
'''
Clean confidential record
'''
summary = cls.raise_user_error(transp, raise_exception=False)
if 'summary' in record:
record['summary'] = summary
vevent = None
if 'vevent' in record:
vevent = record['vevent']
if vevent:
vevent = vobject.readOne(str(vevent))
if hasattr(vevent, 'summary'):
vevent.summary.value = summary
for field, value in (
('description', ''),
('categories', []),
('location', None),
('status', ''),
('organizer', ''),
('attendees', []),
('alarms', [])):
if field in record:
record[field] = value
if field + '.rec_name' in record:
record[field + '.rec_name'] = ''
if vevent:
if hasattr(vevent, field):
delattr(vevent, field)
if vevent:
record['vevent'] = vevent.serialize()
@classmethod
def read(cls, ids, fields_names=None):
Rule = Pool().get('ir.rule')
cursor = Transaction().connection.cursor()
table = cls.__table__()
if len(set(ids)) != cls.search([('id', 'in', ids)],
count=True):
cls.raise_user_error('access_error', cls.__doc__)
writable_ids = []
domain = Rule.query_get(cls.__name__, mode='write')
if domain:
for sub_ids in grouped_slice(ids):
red_sql = reduce_ids(table.id, sub_ids)
cursor.execute(*table.select(table.id,
where=red_sql & table.id.in_(domain)))
writable_ids.extend(x[0] for x in cursor.fetchall())
else:
writable_ids = ids
writable_ids = set(writable_ids)
if fields_names is None:
fields_names = []
fields_names = fields_names[:]
to_remove = set()
for field in ('classification', 'calendar', 'transp'):
if field not in fields_names:
fields_names.append(field)
to_remove.add(field)
res = super(Event, cls).read(ids, fields_names=fields_names)
for record in res:
if record['classification'] == 'confidential' \
and record['id'] not in writable_ids:
cls._clean_confidential(record, record['transp'])
for field in to_remove:
del record[field]
return res
@classmethod
def write(cls, *args):
for events in args[::2]:
if len(set(events)) != cls.search([('id', 'in', map(int, events))],
count=True):
cls.raise_user_error('access_error', cls.__doc__)
super(Event, cls).write(*args)
for events in args[::2]:
if len(set(events)) != cls.search([('id', 'in', map(int, events))],
count=True):
cls.raise_user_error('access_error', cls.__doc__)
@classmethod
def delete(cls, events):
if len(set(events)) != cls.search([('id', 'in', map(int, events))],
count=True):
cls.raise_user_error('access_error', cls.__doc__)
super(Event, cls).delete(events)
|
tryton/calendar_classification
|
calendar_.py
|
Python
|
gpl-3.0
| 5,284
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Module: l10n_hr_fiskal_lazy
# Author: Davor Bojkić
# mail: bole@dajmi5.com
# Copyright (C) 2012- Daj Mi 5,
# http://www.dajmi5.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class account_invoice(osv.Model):
_inherit = "account.invoice"
def _get_fiskal_broj(self, cr, uid, ids, field_name, field_value, context=None):
res={}
for invoice in self.browse(cr, uid, ids):
"""
PAZI!! [3:] samo ako je sequence prefiks %(y)/
Ukoliko se koristi ova opcija onda ju se nesmije uninstalirati naknadno!
"""
res[invoice.id]=(invoice.type in ('out_invoice','out_refund')) and invoice.number and invoice.number[3:].lstrip('0') or False
return res
_columns = {
'fiskal_broj':fields.function(_get_fiskal_broj, type="char", string="Fiskalizirani broj", readonly=True , store=True)
}
def invoice_validate(self, cr, uid, ids, context=None):
assert len(ids)==1,'Jedna po jedna molim lijepo'
inv_check=self.browse(cr, uid, ids[0])
if inv_check.type in ('out_invoice','out_refund'):
if not inv_check.uredjaj_id:
raise osv.except_osv('NIJE MOGUCE!', 'Nije unesen naplatni uredjaj')
#1. provjera po dnevniku/uredjeju
if inv_check.uredjaj_id.prostor_id.id != inv_check.journal_id.prostor_id.id:
raise osv.except_osv('NIJE MOGUCE!', 'Ne slazu se podaci o poslovnom prostoru i dokument prodaje')
#2. provjera po journal/uredjaj
user = self.pool.get('res.users').browse(cr, uid, uid)
if user.uredjaji and inv_check.uredjaj_id not in user.uredjaji:
raise osv.except_osv('NIJE MOGUCE POTVRDITI!', 'Odabrani naplatni Prostor/Blagajana nisu Vam odobreni za koristenje!')
if user.journals and inv_check.journal_id not in user.journals:
raise osv.except_osv('NIJE MOGUCE POTVRDITI!', 'Nemate prava pisanja u odabrani Dokument!')
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
return res
|
decodio/l10n_hr
|
l10n_hr_fiskal_lazy/account_invoice.py
|
Python
|
agpl-3.0
| 3,078
|
# -*- coding: utf-8 -*-
'''
Management of PostgreSQL databases.
=============================================
The postgres_database module is used to create and manage Postgres databases.
Databases can be set as either absent or present
.. code-block:: yaml
frank:
postgres_database.present
'''
# Import salt libs
import salt.utils
def __virtual__():
'''
Only load if the postgres module is present
'''
return 'postgres_database' if 'postgres.user_exists' in __salt__ else False
def present(name,
tablespace=None,
encoding=None,
lc_collate=None,
lc_ctype=None,
owner=None,
template=None,
runas=None,
user=None):
'''
Ensure that the named database is present with the specified properties.
For more information about all of these options see man createdb(1)
name
The name of the database to manage
tablespace
Default tablespace for the database
encoding
The character encoding scheme to be used in this database
lc_collate
The LC_COLLATE setting to be used in this database
lc_ctype
The LC_CTYPE setting to be used in this database
owner
The username of the database owner
template
The template database from which to build this database
runas
System user all operations should be performed on behalf of
.. deprecated:: 0.17.0
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'Database {0} is already present'.format(name)}
salt.utils.warn_until(
'Hydrogen',
'Please remove \'runas\' support at this stage. \'user\' support was '
'added in 0.17.0',
_dont_call_warnings=True
)
if runas:
# Warn users about the deprecation
ret.setdefault('warnings', []).append(
'The \'runas\' argument is being deprecated in favor of \'user\', '
'please update your state files.'
)
if user is not None and runas is not None:
# user wins over runas but let warn about the deprecation.
ret.setdefault('warnings', []).append(
'Passed both the \'runas\' and \'user\' arguments. Please don\'t. '
'\'runas\' is being ignored in favor of \'user\'.'
)
runas = None
elif runas is not None:
# Support old runas usage
user = runas
runas = None
dbs = __salt__['postgres.db_list'](runas=user)
db_params = dbs.get(name, {})
if name in dbs and all((
db_params.get('Tablespace') == tablespace if tablespace else True,
db_params.get('Encoding') == encoding if encoding else True,
db_params.get('Collate') == lc_collate if lc_collate else True,
db_params.get('Ctype') == lc_ctype if lc_ctype else True,
db_params.get('Owner') == owner if owner else True
)):
return ret
elif name in dbs and any((
db_params.get('Encoding') != encoding if encoding else False,
db_params.get('Collate') != lc_collate if lc_collate else False,
db_params.get('Ctype') != lc_ctype if lc_ctype else False
)):
ret['comment'] = 'Database {0} has wrong parameters ' \
'which couldn\'t be changed on fly.'.format(name)
ret['result'] = False
return ret
# The database is not present, make it!
if __opts__['test']:
ret['result'] = None
if name not in dbs:
ret['comment'] = 'Database {0} is set to be created'.format(name)
else:
ret['comment'] = 'Database {0} exists, but parameters ' \
'need to be changed'.format(name)
return ret
if name not in dbs and __salt__['postgres.db_create'](
name,
tablespace=tablespace,
encoding=encoding,
lc_collate=lc_collate,
lc_ctype=lc_ctype,
owner=owner,
template=template,
runas=user):
ret['comment'] = 'The database {0} has been created'.format(name)
ret['changes'][name] = 'Present'
elif name in dbs and __salt__['postgres.db_alter'](name,
tablespace=tablespace,
owner=owner):
ret['comment'] = ('Parameters for database {0} have been changed'
).format(name)
ret['changes'][name] = 'Parameters changed'
elif name in dbs:
ret['comment'] = ('Failed to change parameters for database {0}'
).format(name)
ret['result'] = False
else:
ret['comment'] = 'Failed to create database {0}'.format(name)
ret['result'] = False
return ret
def absent(name, runas=None, user=None):
'''
Ensure that the named database is absent
name
The name of the database to remove
runas
System user all operations should be performed on behalf of
.. deprecated:: 0.17.0
user
System user all operations should be performed on behalf of
.. versionadded:: 0.17.0
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
salt.utils.warn_until(
'Hydrogen',
'Please remove \'runas\' support at this stage. \'user\' support was '
'added in 0.17.0',
_dont_call_warnings=True
)
if runas:
# Warn users about the deprecation
ret.setdefault('warnings', []).append(
'The \'runas\' argument is being deprecated in favor of \'user\', '
'please update your state files.'
)
if user is not None and runas is not None:
# user wins over runas but let warn about the deprecation.
ret.setdefault('warnings', []).append(
'Passed both the \'runas\' and \'user\' arguments. Please don\'t. '
'\'runas\' is being ignored in favor of \'user\'.'
)
runas = None
elif runas is not None:
# Support old runas usage
user = runas
runas = None
#check if db exists and remove it
if __salt__['postgres.db_exists'](name, runas=user):
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Database {0} is set to be removed'.format(name)
return ret
if __salt__['postgres.db_remove'](name, runas=user):
ret['comment'] = 'Database {0} has been removed'.format(name)
ret['changes'][name] = 'Absent'
return ret
# fallback
ret['comment'] = 'Database {0} is not present, so it cannot ' \
'be removed'.format(name)
return ret
|
victorywang80/Maintenance
|
saltstack/src/salt/states/postgres_database.py
|
Python
|
apache-2.0
| 7,266
|
#!/usr/bin/env python
# (c) 2012 - Ryan M. Layer
# Hall Laboratory
# Quinlan Laboratory
# Department of Computer Science
# Department of Biochemistry and Molecular Genetics
# Department of Public Health Sciences and Center for Public Health Genomics,
# University of Virginia
# rl6sf@virginia.edu
import sys
import numpy as np
from operator import itemgetter
from optparse import OptionParser
# some constants for sam/bam field ids
SAM_FLAG = 1
SAM_REFNAME = 2
SAM_MATE_REFNAME = 6
SAM_ISIZE = 8
parser = OptionParser()
parser.add_option("-r",
"--read_length",
type="int",
dest="read_length",
help="Read length")
parser.add_option("-X",
dest="X",
type="int",
help="Number of stdevs from mean to extend")
parser.add_option("-N",
dest="N",
type="int",
help="Number to sample")
parser.add_option("-o",
dest="output_file",
help="Output file")
parser.add_option("-m",
dest="mads",
type="int",
default=10,
help="Outlier cutoff in # of median absolute deviations (unscaled, upper only)")
def unscaled_upper_mad(xs):
"""Return a tuple consisting of the median of xs followed by the
unscaled median absolute deviation of the values in xs that lie
above the median.
"""
med = np.median(xs)
return med, np.median(xs[xs > med] - med)
(options, args) = parser.parse_args()
if not options.read_length:
parser.error('Read length not given')
if not options.X:
parser.error('X not given')
if not options.N:
parser.error('N not given')
if not options.output_file:
parser.error('Output file not given')
required = 97
restricted = 3484
flag_mask = required | restricted
L = []
c = 0
for l in sys.stdin:
if c >= options.N:
break
A = l.rstrip().split('\t')
flag = int(A[SAM_FLAG])
refname = A[SAM_REFNAME]
mate_refname = A[SAM_MATE_REFNAME]
isize = int(A[SAM_ISIZE])
want = mate_refname == "=" and flag & flag_mask == required and isize >= 0
if want:
c += 1
L.append(isize)
# Remove outliers
L = np.array(L)
L.sort()
med, umad = unscaled_upper_mad(L)
upper_cutoff = med + options.mads * umad
L = L[L < upper_cutoff]
new_len = len(L)
removed = c - new_len
sys.stderr.write("Removed %d outliers with isize >= %d\n" %
(removed, upper_cutoff))
c = new_len
mean = np.mean(L)
stdev = np.std(L)
start = options.read_length
end = int(mean + options.X*stdev)
H = [0] * (end - start + 1)
s = 0
for x in L:
if (x >= start) and (x <= end):
j = int(x - start)
H[j] = H[ int(x - start) ] + 1
s += 1
f = open(options.output_file, 'w')
for i in range(end - start):
o = str(i) + "\t" + str(float(H[i])/float(s)) + "\n"
f.write(o)
f.close()
print('mean:' + str(mean) + '\tstdev:' + str(stdev))
|
glebkuznetsov/lumpy-sv
|
scripts/pairend_distro.py
|
Python
|
mit
| 2,794
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('computing', '0008_auto_20141128_0958'),
]
operations = [
migrations.CreateModel(
name='Warranty',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
('warranty_length', models.PositiveSmallIntegerField(null=True, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='computer',
name='additional_software',
field=models.CharField(max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='computer',
name='mac_airport',
field=models.CharField(max_length=17, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='computer',
name='mac_bluetooth',
field=models.CharField(max_length=17, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='computer',
name='netrestore_image',
field=models.CharField(max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='computer',
name='part_no',
field=models.CharField(max_length=50, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='computer',
name='purchase_date',
field=models.DateField(null=True, verbose_name=b'purchase date', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='computer',
name='repair_log',
field=models.TextField(null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='computer',
name='standard_software',
field=models.CharField(max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='computer',
name='warranty_type',
field=models.ForeignKey(blank=True, to='computing.Warranty', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='computer',
name='ip',
field=models.GenericIPAddressField(null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='subnet',
name='from_ip',
field=models.GenericIPAddressField(),
preserve_default=True,
),
migrations.AlterField(
model_name='subnet',
name='to_ip',
field=models.GenericIPAddressField(),
preserve_default=True,
),
]
|
tamasgal/rlogbook
|
rlogbook/computing/migrations/0009_auto_20141128_1121.py
|
Python
|
mit
| 3,259
|
# Artshow Keeper: A support tool for keeping an Artshow running.
# Copyright (C) 2014 Ivo Hanak
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import functools
import flask
import os
class UserGroups:
ADMIN = 'admin'
SCAN_DEVICE = 'scandevice'
OTHERS = 'others'
UNKNOWN = 'unknown'
def auth(allow=UserGroups.ADMIN):
def decorator_auth_allow(func):
@functools.wraps(func)
def decorated_function(*args, **kwargs):
if flask.g.userGroup == UserGroups.ADMIN \
or (not isinstance(allow, list) and flask.g.userGroup == str(allow)) \
or (isinstance(allow, list) and flask.g.userGroup in allow):
return func(*args, **kwargs)
elif flask.g.userGroup == UserGroups.UNKNOWN:
return flask.redirect(flask.url_for('authenticate', next=flask.request.full_path))
else:
return flask.abort(404)
return decorated_function
return decorator_auth_allow
def getNonZeroRandom(size=8):
code = 0
iteration = 0
while code == 0 and iteration < 3:
bytes = os.urandom(size)
for byte in bytes:
code = (code * 256) + byte
iteration = iteration + 1
return code
|
hanak/artshow-keeper
|
artshowkeeper/common/authentication.py
|
Python
|
gpl-3.0
| 1,846
|
import fractions
import functools
import math
import itertools
import logging
logger = logging.getLogger('claripy.vsa.strided_interval')
from .decorators import expand_ifproxy
from ..backend_object import BackendObject
def normalize_types(f):
@functools.wraps(f)
def normalizer(self, o):
'''
Convert any object to an object that we can process.
'''
# Special handler for union
if f.__name__ == 'union' and isinstance(o, DiscreteStridedIntervalSet):
return o.union(self)
if isinstance(o, ValueSet) or isinstance(o, IfProxy) or isinstance(o, DiscreteStridedIntervalSet):
# It should be put to o.__radd__(self) when o is a ValueSet
return NotImplemented
if isinstance(o, Base):
o = o.model
if isinstance(self, Base):
self = o.model
if type(self) is BVV:
self = self.value
if type(o) is BVV:
o = o.value
if type(o) in (int, long):
o = StridedInterval(bits=StridedInterval.min_bits(o), stride=0, lower_bound=o, upper_bound=o)
if type(self) in (int, long):
self = StridedInterval(bits=StridedInterval.min_bits(self), stride=0, lower_bound=self, upper_bound=self)
if f.__name__ not in ('concat', ):
# Make sure they have the same length
common_bits = max(o.bits, self.bits)
if o.bits < common_bits:
o = o.zero_extend(common_bits)
if self.bits < common_bits:
self = self.zero_extend(common_bits)
self_reversed = False
if self._reversed != o._reversed:
# We are working on two instances that have different endianness!
# Make sure the `reversed` property of self is kept the same after operation
if self._reversed:
self_reversed = True
self = self.copy()
self._reversed = False
else:
# If self is an integer, we wanna reverse self as well
if self.is_integer:
self = self._reverse()
self_reversed = True
else:
o = o._reverse()
ret = f(self, o)
if self_reversed and isinstance(ret, StridedInterval):
ret = ret.reverse()
return ret
return normalizer
si_id_ctr = itertools.count()
# Whether DiscreteStridedIntervalSet should be used or not. Sometimes we manually set it to False to allow easy
# implementation of test cases.
allow_dsis = False
class StridedInterval(BackendObject):
"""
A Strided Interval is represented in the following form:
bits,stride[lower_bound, upper_bound]
For more details, please refer to relevant papers like TIE and WYSINWYE.
This implementation is signedness-agostic, please refer to _Signedness-Agnostic Program Analysis: Precise Integer
Bounds for Low-Level Code_ by Jorge A. Navas, etc. for more details.
Thanks all corresponding authors for their outstanding works.
"""
def __init__(self, name=None, bits=0, stride=None, lower_bound=None, upper_bound=None, uninitialized=False, bottom=False):
self._name = name
if self._name is None:
self._name = "SI_%d" % si_id_ctr.next()
self._bits = bits
self._stride = stride
self._lower_bound = lower_bound
self._upper_bound = upper_bound
if lower_bound is not None and type(lower_bound) not in (int, long):
raise ClaripyVSAError("'lower_bound' must be an int or a long. %s is not supported." % type(lower_bound))
if upper_bound is not None and type(upper_bound) not in (int, long):
raise ClaripyVSAError("'upper_bound' must be an int or a long. %s is not supported." % type(upper_bound))
self._reversed = False
self._is_bottom = bottom
self.uninitialized = uninitialized
if self._upper_bound is not None and bits == 0:
self._bits = self._min_bits()
if self._upper_bound is None:
self._upper_bound = StridedInterval.max_int(self.bits)
if self._lower_bound is None:
self._lower_bound = StridedInterval.min_int(self.bits)
# For lower bound and upper bound, we always store the unsigned version
self._lower_bound = self._lower_bound & (2 ** bits - 1)
self._upper_bound = self._upper_bound & (2 ** bits - 1)
self.normalize()
def copy(self):
si = StridedInterval(name=self._name,
bits=self.bits,
stride=self.stride,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound,
uninitialized=self.uninitialized,
bottom=self._is_bottom)
si._reversed = self._reversed
return si
def nameless_copy(self):
si = StridedInterval(name=None,
bits=self.bits,
stride=self.stride,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound,
uninitialized=self.uninitialized,
bottom=self._is_bottom)
si._reversed = self._reversed
return si
def normalize(self):
if self.bits == 8 and self.reversed:
self._reversed = False
if self.is_empty:
return self
if self.lower_bound == self.upper_bound:
self._stride = 0
if self.lower_bound < 0:
self.lower_bound = self.lower_bound & (2 ** self.bits - 1)
self._normalize_top()
if self._stride < 0:
raise Exception("Why does this happen?")
return self
def eval(self, n, signed=False):
"""
Evaluate this StridedInterval to obtain a list of concrete integers
:param n: Upper bound for the number of concrete integers
:param signed: Treat this StridedInterval as signed or unsigned
:return: A list of at most `n` concrete integers
"""
results = [ ]
if self.is_empty:
# no value is available
pass
elif self.stride == 0 and n > 0:
results.append(self.lower_bound)
else:
if signed:
# View it as a signed integer
bounds = self._signed_bounds()
else:
# View it as an unsigned integer
bounds = self._unsigned_bounds()
for lb, ub in bounds:
while len(results) < n and lb <= ub:
results.append(lb)
lb += self.stride # It will not overflow
return results
#
# Private methods
#
def __hash__(self):
return hash((self.bits, self.lower_bound, self.upper_bound, self.stride, self._reversed, self.uninitialized))
def _normalize_top(self):
if self.lower_bound == self._modular_add(self.upper_bound, 1, self.bits) and self.stride == 1:
# This is a TOP!
# Normalize it
self.lower_bound = 0
self.upper_bound = self.max_int(self.bits)
def _ssplit(self):
"""
Split `self` at the south pole, which is the same as in unsigned arithmetic
:return: A list of split StridedIntervals
"""
south_pole_right = self.max_int(self.bits) # 111...1
# south_pole_left = 0
# Is `self` straddling the south pole?
if self.upper_bound < self.lower_bound:
# It straddles the south pole!
a_upper_bound = south_pole_right - ((south_pole_right - self.lower_bound) % self.stride)
a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound, upper_bound=a_upper_bound)
b_lower_bound = self._modular_add(a_upper_bound, self.stride, self.bits)
b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound, upper_bound=self.upper_bound)
return [ a, b ]
else:
return [ self.copy() ]
def _nsplit(self):
"""
Split `self` at the north pole, which is the same as in signed arithmetic
:return: A list of split StridedIntervals
"""
north_pole_left = self.max_int(self.bits - 1) # 01111...1
north_pole_right = 2 ** (self.bits - 1) # 1000...0
# Is `self` straddling the north pole?
if self.lower_bound <= north_pole_left and self.upper_bound >= north_pole_right:
# Yes it does!
a_upper_bound = north_pole_left - ((north_pole_left - self.lower_bound) % self.stride)
a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound, upper_bound=a_upper_bound)
b_lower_bound = a_upper_bound + self.stride
b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound, upper_bound=self.upper_bound)
return [ a, b ]
else:
return [ self.copy() ]
def _psplit(self):
"""
Split `self` at both north and south poles
:return: A list of split StridedIntervals
"""
nsplit_list = self._nsplit()
psplit_list = [ ]
for si in nsplit_list:
psplit_list.extend(si._ssplit())
return psplit_list
def _signed_bounds(self):
"""
Get lower bound and upper bound for `self` in signed arithmetic
:return: a list of (lower_bound, upper_bound) tuples
"""
nsplit = self._nsplit()
if len(nsplit) == 1:
lb = nsplit[0].lower_bound
ub = nsplit[0].upper_bound
lb = self._unsigned_to_signed(lb, self.bits)
ub = self._unsigned_to_signed(ub, self.bits)
return [ (lb, ub) ]
elif len(nsplit) == 2:
# nsplit[0] is on the left hemisphere, and nsplit[1] is on the right hemisphere
# The left one
lb_1 = nsplit[0].lower_bound
ub_1 = nsplit[0].upper_bound
# The right one
lb_2 = nsplit[1].lower_bound
ub_2 = nsplit[1].upper_bound
# Then convert them to negative numbers
lb_2 = self._unsigned_to_signed(lb_2, self.bits)
ub_2 = self._unsigned_to_signed(ub_2, self.bits)
return [ (lb_1, ub_1), (lb_2, ub_2) ]
else:
raise Exception('WTF')
def _unsigned_bounds(self):
"""
Get lower bound and upper bound for `self` in unsigned arithmetic
:return: a list of (lower_bound, upper_bound) tuples
"""
ssplit = self._ssplit()
if len(ssplit) == 1:
lb = ssplit[0].lower_bound
ub = ssplit[0].upper_bound
return [ (lb, ub) ]
elif len(ssplit) == 2:
# ssplit[0] is on the left hemisphere, and ssplit[1] is on the right hemisphere
lb_1 = ssplit[0].lower_bound
ub_1 = ssplit[0].upper_bound
lb_2 = ssplit[1].lower_bound
ub_2 = ssplit[1].upper_bound
return [ (lb_1, ub_1), (lb_2, ub_2) ]
else:
raise Exception('WTF')
#
# Comparison operations
#
def identical(self, o):
"""
Used to make exact comparisons between two StridedIntervals. Usually it is only used in test cases.
:param o: The other StridedInterval to compare with
:return: True if they are exactly same, False otherwise
"""
if (self.bits == o.bits and
self.stride == o.stride and
self.lower_bound == o.lower_bound and
self.upper_bound == o.upper_bound):
return True
else:
return False
@normalize_types
def SLT(self, o):
"""
Signed less than
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
signed_bounds_1 = self._signed_bounds()
signed_bounds_2 = o._signed_bounds()
ret = [ ]
for lb_1, ub_1 in signed_bounds_1:
for lb_2, ub_2 in signed_bounds_2:
if ub_1 < lb_2:
ret.append(TrueResult())
elif lb_1 >= ub_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all([r == TrueResult() for r in ret]):
return TrueResult()
elif all([r == FalseResult() for r in ret]):
return FalseResult()
else:
return MaybeResult()
@normalize_types
def SLE(self, o):
"""
Signed less than or equal to
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
signed_bounds_1 = self._signed_bounds()
signed_bounds_2 = o._signed_bounds()
ret = []
for lb_1, ub_1 in signed_bounds_1:
for lb_2, ub_2 in signed_bounds_2:
if ub_1 <= lb_2:
ret.append(TrueResult())
elif lb_1 > ub_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all([r == TrueResult() for r in ret]):
return TrueResult()
elif all([r == FalseResult() for r in ret]):
return FalseResult()
else:
return MaybeResult()
@normalize_types
def SGT(self, o):
"""
Signed greater than
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
signed_bounds_1 = self._signed_bounds()
signed_bounds_2 = o._signed_bounds()
ret = []
for lb_1, ub_1 in signed_bounds_1:
for lb_2, ub_2 in signed_bounds_2:
if lb_1 > ub_2:
ret.append(TrueResult())
elif ub_1 <= lb_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all([r == TrueResult() for r in ret]):
return TrueResult()
elif all([r == FalseResult() for r in ret]):
return FalseResult()
else:
return MaybeResult()
@normalize_types
def SGE(self, o):
"""
Signed greater than or equal to
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
signed_bounds_1 = self._signed_bounds()
signed_bounds_2 = o._signed_bounds()
ret = []
for lb_1, ub_1 in signed_bounds_1:
for lb_2, ub_2 in signed_bounds_2:
if lb_1 >= ub_2:
ret.append(TrueResult())
elif ub_1 < lb_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all([r == TrueResult() for r in ret]):
return TrueResult()
elif all([r == FalseResult() for r in ret]):
return FalseResult()
else:
return MaybeResult()
@normalize_types
def ULT(self, o):
"""
Unsigned less than
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
unsigned_bounds_1 = self._unsigned_bounds()
unsigned_bounds_2 = o._unsigned_bounds()
ret = []
for lb_1, ub_1 in unsigned_bounds_1:
for lb_2, ub_2 in unsigned_bounds_2:
if ub_1 < lb_2:
ret.append(TrueResult())
elif lb_1 >= ub_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all([r == TrueResult() for r in ret]):
return TrueResult()
elif all([r == FalseResult() for r in ret]):
return FalseResult()
else:
return MaybeResult()
@normalize_types
def ULE(self, o):
"""
Unsigned less than or equal to
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
unsigned_bounds_1 = self._unsigned_bounds()
unsigned_bounds_2 = o._unsigned_bounds()
ret = []
for lb_1, ub_1 in unsigned_bounds_1:
for lb_2, ub_2 in unsigned_bounds_2:
if ub_1 <= lb_2:
ret.append(TrueResult())
elif lb_1 > ub_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all([r == TrueResult() for r in ret]):
return TrueResult()
elif all([r == FalseResult() for r in ret]):
return FalseResult()
else:
return MaybeResult()
@normalize_types
def UGT(self, o):
"""
Signed greater than
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
unsigned_bounds_1 = self._unsigned_bounds()
unsigned_bounds_2 = o._unsigned_bounds()
ret = []
for lb_1, ub_1 in unsigned_bounds_1:
for lb_2, ub_2 in unsigned_bounds_2:
if lb_1 > ub_2:
ret.append(TrueResult())
elif ub_1 <= lb_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all([r == TrueResult() for r in ret]):
return TrueResult()
elif all([r == FalseResult() for r in ret]):
return FalseResult()
else:
return MaybeResult()
@normalize_types
def UGE(self, o):
"""
Unsigned greater than or equal to
:param o: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
unsigned_bounds_1 = self._unsigned_bounds()
unsigned_bounds_2 = o._unsigned_bounds()
ret = []
for lb_1, ub_1 in unsigned_bounds_1:
for lb_2, ub_2 in unsigned_bounds_2:
if lb_1 >= ub_2:
ret.append(TrueResult())
elif ub_1 < lb_2:
ret.append(FalseResult())
else:
ret.append(MaybeResult())
if all([r == TrueResult() for r in ret]):
return TrueResult()
elif all([r == FalseResult() for r in ret]):
return FalseResult()
else:
return MaybeResult()
def eq(self, o):
"""
Equal
:param o: The ohter operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
if (self.is_integer
and o.is_integer
):
# Two integers
if self.lower_bound == o.lower_bound:
# They are equal
return TrueResult()
else:
# They are not equal
return FalseResult()
else:
if self.name == o.name:
return TrueResult() # They are the same guy
si_intersection = self.intersection(o)
if si_intersection.is_empty:
return FalseResult()
else:
return MaybeResult()
#
# Overriding default operators in Python
#
def __len__(self):
'''
Get the length in bits of this variable.
:return:
'''
return self._bits
@normalize_types
def __eq__(self, o):
return self.eq(o)
@normalize_types
def __ne__(self, o):
return ~(self.eq(o))
def __gt__(self, other):
"""
Unsigned greater than
:param other: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
return self.UGT(other)
def __ge__(self, other):
"""
Unsigned greater than or equal to
:param other: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
return self.UGE(other)
def __lt__(self, other):
"""
Unsigned less than
:param other: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
return self.ULT(other)
def __le__(self, other):
"""
Unsigned less than or equal to
:param other: The other operand
:return: TrueResult(), FalseResult(), or MaybeResult()
"""
return self.ULE(other)
@normalize_types
def __add__(self, o):
return self.add(o)
@normalize_types
def __sub__(self, o):
return self.sub(o)
@normalize_types
def __mul__(self, o):
return self.mul(o)
@normalize_types
def __mod__(self, o):
# TODO: Make a better approximation
if self.is_integer and o.is_integer:
r = self.lower_bound % o.lower_bound
si = StridedInterval(bits=self.bits, stride=0, lower_bound=r, upper_bound=r)
return si
else:
si = StridedInterval(bits=self.bits, stride=1, lower_bound=0, upper_bound=o.upper_bound - 1)
return si
@normalize_types
def __div__(self, o):
"""
Unsigned division
:param o: The divisor
:return: The quotient (self / o)
"""
return self.udiv(o)
def __neg__(self):
return self.bitwise_not()
def __invert__(self):
return self.bitwise_not()
@expand_ifproxy
@normalize_types
def __or__(self, other):
return self.bitwise_or(other)
@normalize_types
def __and__(self, other):
return self.bitwise_and(other)
def __rand__(self, other):
return self.__and__(other)
@expand_ifproxy
@normalize_types
def __xor__(self, other):
return self.bitwise_xor(other)
@expand_ifproxy
def __rxor__(self, other):
return self.__xor__(other)
def __lshift__(self, other):
return self.lshift(other)
def __rshift__(self, other):
return self.rshift(other)
def __repr__(self):
s = ""
if self.is_empty:
s = '%s<%d>[EmptySI]' % (self._name, self._bits)
else:
s = '%s<%d>0x%x[%s, %s]%s' % (self._name, self._bits, self._stride,
self._lower_bound if type(self._lower_bound) == str else hex(
self._lower_bound),
self._upper_bound if type(self._upper_bound) == str else hex(
self._upper_bound),
'R' if self._reversed else '')
if self.uninitialized:
s += "(uninit)"
return s
#
# Properties
#
@property
def name(self):
return self._name
@property
def reversed(self):
return self._reversed
@property
def size(self):
logger.warning("StridedInterval.size will be deprecated soon. Please use StridedInterval.cardinality instead.")
return self.cardinality
@property
def cardinality(self):
if self.is_integer:
if self.is_empty:
return 0
else:
return 1
else:
return (self._modular_sub(self._upper_bound, self._lower_bound, self.bits) + 1) / self._stride
@property
def lower_bound(self):
return self._lower_bound
@lower_bound.setter
def lower_bound(self, value):
self._lower_bound = value
@property
def upper_bound(self):
return self._upper_bound
@upper_bound.setter
def upper_bound(self, value):
self._upper_bound = value
@property
def bits(self):
return self._bits
@property
def stride(self):
return self._stride
@stride.setter
def stride(self, value):
self._stride = value
@property
def max(self):
if not self.is_empty:
return self.upper_bound
else:
# It is empty!
return None
@property
def min(self):
if not self.is_empty:
return self.lower_bound
else:
# It is empty
return None
@property
def unique(self):
return self.min is not None and self.min == self.max
def _min_bits(self):
v = self._upper_bound
assert v >= 0
return StridedInterval.min_bits(v)
@property
def is_empty(self):
"""
The same as is_bottom
:return: True/False
"""
return self.is_bottom
@property
def is_top(self):
'''
If this is a TOP value
:return: True if this is a TOP
'''
return (self.stride == 1 and
self.lower_bound == self._modular_add(self.upper_bound, 1, self.bits)
)
@property
def is_bottom(self):
"""
Whether this StridedInterval is a BOTTOM, in other words, describes an empty set of integers
:return: True/False
"""
return self._is_bottom
@property
def is_integer(self):
'''
If this is an integer, i.e. self.lower_bound == self.upper_bound
:return: True if this is an integer, False otherwise
'''
return self.lower_bound == self.upper_bound
#
# Modular arithmetic
#
@staticmethod
def _modular_add(a, b, bits):
return (a + b) % (2 ** bits)
@staticmethod
def _modular_sub(a, b, bits):
return (a - b) % (2 ** bits)
@staticmethod
def _modular_mul(a, b, bits):
return (a * b) % (2 ** bits)
#
# Helper methods
#
@staticmethod
def lcm(a, b):
"""
Get the least common multiple
:param a: The first operand (integer)
:param b: The second operand (integer)
:return: Their LCM
"""
return a * b // fractions.gcd(a, b)
@staticmethod
def highbit(k):
return 1 << (k - 1)
@staticmethod
def min_bits(val):
if val == 0:
return 1
elif val < 0:
return int(math.log(-val, 2) + 1) + 1
else:
# Here we assume the maximum val is 64 bits
# Special case to deal with the floating-point imprecision
if val > 0xfffffffffffe0000 and val <= 0x10000000000000000:
return 64
return int(math.log(val, 2) + 1)
@staticmethod
def max_int(k):
# return StridedInterval.highbit(k + 1) - 1
return StridedInterval.highbit(k + 1) - 1
@staticmethod
def min_int(k):
return -StridedInterval.highbit(k)
@staticmethod
def _ntz(x):
'''
Get the position of first non-zero bit
:param x:
:return:
'''
if x == 0:
return 0
y = (~x) & (x - 1) # There is actually a bug in BAP until 0.8
def bits(y):
n = 0
while y != 0:
n += 1
y >>= 1
return n
return bits(y)
@staticmethod
def _to_negative(a, bits):
return -((1 << bits) - a)
@staticmethod
def upper(bits, i, stride):
'''
:return:
'''
if stride >= 1:
offset = i % stride
max = StridedInterval.max_int(bits) # pylint:disable=redefined-builtin
max_offset = max % stride
if max_offset >= offset:
o = max - (max_offset - offset)
else:
o = max - ((max_offset + stride) - offset)
return o
else:
return StridedInterval.max_int(bits)
@staticmethod
def lower(bits, i, stride):
'''
:return:
'''
if stride >= 1:
offset = i % stride
min = StridedInterval.min_int(bits) # pylint:disable=redefined-builtin
min_offset = min % stride
if offset >= min_offset:
o = min + (offset - min_offset)
else:
o = min + ((offset + stride) - min_offset)
return o
else:
return StridedInterval.min_int(bits)
@staticmethod
def top(bits, name=None, uninitialized=False):
'''
Get a TOP StridedInterval
:return:
'''
return StridedInterval(name=name,
bits=bits,
stride=1,
lower_bound=0,
upper_bound=StridedInterval.max_int(bits),
uninitialized=uninitialized)
@staticmethod
def empty(bits):
return StridedInterval(bits=bits, bottom=True)
@staticmethod
def _wrapped_cardinality(x, y, bits):
"""
Return the cardinality for a set of number (| x, y |) on the wrapped-interval domain
:param x: The first operand (an integer)
:param y: The second operand (an integer)
:return: The cardinality
"""
if x == y + 1:
return 2 ** bits
else:
return ((y - x) + 1) & (2 ** bits - 1)
@staticmethod
def _is_msb_zero(v, bits):
"""
Checks if the most significant bit is zero (i.e. is the integer positive under signed arithmetic)
:param v: The integer to check with
:param bits: Bits of the integer
:return: True or False
"""
return (v & (2 ** bits - 1)) & (2 ** (bits - 1)) == 0
@staticmethod
def _unsigned_to_signed(v, bits):
"""
Convert an unsigned integer to a signed integer
:param v: The unsigned integer
:param bits: How many bits this integer should be
:return: The converted signed integer
"""
if StridedInterval._is_msb_zero(v, bits):
return v
else:
return -(2 ** bits - v)
@staticmethod
def _wrappedoverflow_add(a, b):
"""
Determines if an overflow happens during the addition of `a` and `b`.
:param a: The first operand (StridedInterval)
:param b: The other operand (StridedInterval)
:return: True if overflows, False otherwise
"""
if a.is_integer and a.lower_bound == 0:
# Special case: if `a` or `b` is a zero
card_self = 0
else:
card_self = StridedInterval._wrapped_cardinality(a.lower_bound, a.upper_bound, a.bits)
if b.is_integer and b.lower_bound == 0:
# Special case: if `a` or `b` is a zero
card_b = 0
else:
card_b = StridedInterval._wrapped_cardinality(b.lower_bound, b.upper_bound, b.bits)
return (card_self + card_b) > StridedInterval.max_int(a.bits)
@staticmethod
def _wrappedoverflow_sub(a, b):
"""
Determines if an overflow happens during the subtraction of `a` and `b`.
:param a: The first operand (StridedInterval)
:param b: The other operand (StridedInterval)
:return: True if overflows, False otherwise
"""
return StridedInterval._wrappedoverflow_add(a, b)
@staticmethod
def _wrapped_unsigned_mul(a, b):
"""
Perform wrapped unsigned multiplication on two StridedIntervals
:param a: The first operand (StridedInterval)
:param b: The second operand (StridedInterval)
:return: The multiplication result
"""
bits = max(a.bits, b.bits)
lb = a.lower_bound * b.lower_bound
ub = a.upper_bound * b.upper_bound
max_ = StridedInterval.max_int(bits)
if lb > max_ or ub > max_:
# Overflow occurred
return StridedInterval.top(bits, uninitialized=False)
else:
if b.is_integer:
# Multiplication with an integer, and it does not overflow!
stride = abs(a.stride * b.lower_bound)
elif a.is_integer:
stride = abs(a.lower_bound * b.stride)
else:
stride = fractions.gcd(a.stride, b.stride)
return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)
@staticmethod
def _wrapped_signed_mul(a, b):
"""
Perform wrapped signed multiplication on two StridedIntervals
:param a: The first operand (StridedInterval)
:param b: The second operand (StridedInterval)
:return: The product
"""
bits = max(a.bits, b.bits)
a_lb_positive = StridedInterval._is_msb_zero(a.lower_bound, bits)
a_ub_positive = StridedInterval._is_msb_zero(a.upper_bound, bits)
b_lb_positive = StridedInterval._is_msb_zero(b.lower_bound, bits)
b_ub_positive = StridedInterval._is_msb_zero(b.upper_bound, bits)
if b.is_integer:
# Multiplication with an integer, and it does not overflow!
# Note that as long as it overflows, a TOP will be returned and the stride will be simply ignored
stride = abs(a.stride * b.lower_bound)
elif a.is_integer:
stride = abs(a.lower_bound * b.stride)
else:
stride = fractions.gcd(a.stride, b.stride)
max_ = StridedInterval.max_int(bits)
if (a_lb_positive and a_ub_positive and b_lb_positive and b_ub_positive):
# [2, 5] * [10, 20] = [20, 100]
lb = a.lower_bound * b.lower_bound
ub = a.upper_bound * b.upper_bound
if lb > max_ or ub > max_:
# overflow
return StridedInterval.top(bits)
else:
return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)
elif (not a_lb_positive and not a_ub_positive and not b_lb_positive and not b_ub_positive):
# [-5, -2] * [-20, -10] = [20, 100]
lb = (
StridedInterval._unsigned_to_signed(a.upper_bound, bits) *
StridedInterval._unsigned_to_signed(b.upper_bound, bits)
)
ub = (
StridedInterval._unsigned_to_signed(a.lower_bound, bits) *
StridedInterval._unsigned_to_signed(b.lower_bound, bits)
)
if lb > max_ or ub > max_:
# overflow
return StridedInterval.top(bits)
else:
return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)
elif (not a_lb_positive and not a_ub_positive and b_lb_positive and b_ub_positive):
# [-10, -2] * [2, 5] = [-50, -4]
lb = StridedInterval._unsigned_to_signed(a.lower_bound, bits) * b.upper_bound
ub = StridedInterval._unsigned_to_signed(a.upper_bound, bits) * b.lower_bound
if lb & (2 ** bits - 1) > max_ or ub & (2 ** bits - 1) > max_:
# overflow
return StridedInterval.top(bits)
else:
return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)
elif (a_lb_positive and a_ub_positive and not b_lb_positive and not b_ub_positive):
# [2, 10] * [-5, -2] = [-50, -4]
lb = a.upper_bound * StridedInterval._unsigned_to_signed(b.lower_bound, bits)
ub = a.lower_bound * StridedInterval._unsigned_to_signed(b.upper_bound, bits)
if lb & (2 ** bits - 1) > max_ or ub & (2 ** bits - 1) > max_:
# overflow
return StridedInterval.top(bits)
else:
return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)
else:
raise Exception('We shouldn\'t see this case: %s * %s' % (a, b))
@staticmethod
def _wrapped_unsigned_div(a, b):
"""
Perform wrapped unsigned division on two StridedIntervals.
:param a: The dividend (StridedInterval)
:param b: The divisor (StridedInterval)
:return: The quotient
"""
bits = max(a.bits, b.bits)
divisor_lb, divisor_ub = b.lower_bound, b.upper_bound
# Make sure divisor_lb and divisor_ub is not 0
if divisor_lb == 0:
# Can we increment it?
if divisor_ub == 0:
# We can't :-(
return StridedInterval.empty(bits)
else:
divisor_lb += 1
lb = a.lower_bound / divisor_ub
ub = a.upper_bound / divisor_lb
# TODO: Can we make a more precise estimate of the stride?
stride = 1
return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)
@staticmethod
def _wrapped_signed_div(a, b):
"""
Perform wrapped unsigned division on two StridedIntervals.
:param a: The dividend (StridedInterval)
:param b: The divisor (StridedInterval)
:return: The quotient
"""
bits = max(a.bits, b.bits)
# Make sure the divisor is not 0
divisor_lb = b.lower_bound
divisor_ub = b.upper_bound
if divisor_lb == 0:
# Try to increment it
if divisor_ub == 0:
return StridedInterval.empty(bits)
else:
divisor_lb = 1
dividend_positive = StridedInterval._is_msb_zero(a.lower_bound, bits)
divisor_positive = StridedInterval._is_msb_zero(b.lower_bound, bits)
# TODO: Can we make a more precise estimate of the stride?
stride = 1
if dividend_positive and divisor_positive:
# They are all positive numbers!
lb = a.lower_bound / divisor_ub
ub = a.upper_bound / divisor_lb
elif dividend_positive and not divisor_positive:
# + / -
lb = a.upper_bound / StridedInterval._unsigned_to_signed(divisor_ub, bits)
ub = a.lower_bound / StridedInterval._unsigned_to_signed(divisor_lb, bits)
elif not dividend_positive and divisor_positive:
# - / +
lb = StridedInterval._unsigned_to_signed(a.lower_bound, bits) / divisor_lb
ub = StridedInterval._unsigned_to_signed(a.upper_bound, bits) / divisor_ub
else:
# - / -
lb = StridedInterval._unsigned_to_signed(a.upper_bound, bits) / \
StridedInterval._unsigned_to_signed(b.lower_bound, bits)
ub = StridedInterval._unsigned_to_signed(a.lower_bound, bits) / \
StridedInterval._unsigned_to_signed(b.upper_bound, bits)
return StridedInterval(bits=bits, stride=stride, lower_bound=lb, upper_bound=ub)
@staticmethod
def _wrapped_bitwise_or(a, b):
if a.is_empty or b.is_empty:
logger.error('Bitwise_or on empty strided-intervals.')
return a.copy()
# Special handling for integers
# TODO: Is this special handling still necessary?
if a.is_integer:
# self is an integer
t = StridedInterval._ntz(b.stride)
elif b.is_integer:
# b is an integer
t = StridedInterval._ntz(a.stride)
else:
t = min(StridedInterval._ntz(a.stride), StridedInterval._ntz(b.stride))
# If a or b is zero, we can make the stride more precise!
premask = 1 << t
if a.is_integer and a.lower_bound == 0:
# a is 0
# or'ng with zero does not change the stride
stride_ = b.stride
elif b.is_integer and b.lower_bound == 0:
# b is 0
stride_ = a.stride
else:
stride_ = 1 << t
lowbits = (a.lower_bound | b.lower_bound) & (premask - 1)
# TODO: Make this function looks better
r_1 = a.lower_bound < 0
r_2 = a.upper_bound < 0
r_3 = b.lower_bound < 0
r_4 = b.upper_bound < 0
if (r_1, r_2, r_3, r_4) == (True, True, True, True):
lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
elif (r_1, r_2, r_3, r_4) == (True, True, False, False):
lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
elif (r_1, r_2, r_3, r_4) == (False, False, True, True):
lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
elif (r_1, r_2, r_3, r_4) == (False, False, False, False):
lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
elif (r_1, r_2, r_3, r_4) == (True, True, True, False):
lb_ = a.lower_bound
ub_ = 1
elif (r_1, r_2, r_3, r_4) == (True, False, True, True):
lb_ = b.lower_bound
ub_ = 1
elif (r_1, r_2, r_3, r_4) == (True, False, True, False):
lb_ = min(a.lower_bound, b.lower_bound)
ub_ = StridedInterval.max_or(a.bits, 0, a.upper_bound, 0, b.upper_bound)
elif (r_1, r_2, r_3, r_4) == (True, False, False, False):
lb_ = StridedInterval.min_or(a.bits, a.lower_bound, 1, b.lower_bound, b.upper_bound)
ub_ = StridedInterval.max_or(a.bits, 0, a.upper_bound, b.lower_bound, b.upper_bound)
elif (r_1, r_2, r_3, r_4) == (False, False, True, False):
lb_ = StridedInterval.min_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, 1)
ub_ = StridedInterval.max_or(a.bits, a.lower_bound, a.upper_bound, b.lower_bound, b.upper_bound)
else:
raise ArithmeticError("Impossible")
highmask = ~(premask - 1)
ret = StridedInterval(bits=a.bits, stride=stride_, lower_bound=(lb_ & highmask) | lowbits,
upper_bound=(ub_ & highmask) | lowbits)
ret.normalize()
return ret
@staticmethod
def _wrapped_bitwise_and(a, b):
def number_of_ones(n):
ctr = 0
while n > 0:
ctr += 1
n &= n - 1
return ctr
# If only one bit is set in b, we can make it more precise
if b.is_integer:
if b.lower_bound == (1 << (b.bits - 1)):
# It's testing the sign bit
stride = 1 << (b.bits - 1)
if a.lower_bound < 0:
if a.upper_bound >= 0:
return StridedInterval(bits=b.bits, stride=stride, lower_bound=0, upper_bound=stride)
else:
return StridedInterval(bits=b.bits, stride=0, lower_bound=stride, upper_bound=stride)
else:
if a.lower_bound >= stride and a.upper_bound >= stride:
return StridedInterval(bits=b.bits, stride=0, lower_bound=stride, upper_bound=stride)
elif a.lower_bound < stride and a.upper_bound >= stride:
return StridedInterval(bits=b.bits, stride=stride, lower_bound=0, upper_bound=stride)
else:
return StridedInterval(bits=b.bits, stride=0, lower_bound=0, upper_bound=0)
elif number_of_ones(b.lower_bound) == 1:
if a.lower_bound < 0 and a.upper_bound > 0:
mask = (2 ** a.bits) - 1
s = a.copy()
s.lower_bound = a.lower_bound & mask
if s.lower_bound > s.upper_bound:
t = s.upper_bound
s.upper_bound = s.lower_bound
s.lower_bound = t
else:
s = a
first_one_pos = StridedInterval._ntz(b.lower_bound)
stride = 2 ** first_one_pos
if s.lower_bound <= stride and s.upper_bound >= stride:
return StridedInterval(bits=s.bits, stride=stride, lower_bound=0, upper_bound=stride)
elif s.upper_bound < stride:
return StridedInterval(bits=s.bits, stride=0, lower_bound=0, upper_bound=0)
else:
return StridedInterval(bits=s.bits, stride=0, lower_bound=stride, upper_bound=stride)
return a.bitwise_not().bitwise_or(b.bitwise_not()).bitwise_not()
#
# Membership testing and poset ordering
#
@staticmethod
def _lex_lte(x, y, bits):
"""
Lexicographical LTE comparison
:param x: The first operand (integer)
:param y: The second operand (integer)
:param bits: bit-width of the operands
:return: True or False
"""
return (x & (2 ** bits - 1)) <= (y & (2 ** bits - 1))
@staticmethod
def _lex_lt(x, y, bits):
"""
Lexicographical LT comparison
:param x: The first operand (integer)
:param y: The second operand (integer)
:param bits: bit-width of the operands
:return: True or False
"""
return (x & (2 ** bits - 1)) < (y & (2 ** bits - 1))
def _wrapped_member(self, v):
"""
Test if integer v belongs to StridedInterval a
:param self: A StridedInterval instance
:param v: An integer
:return: True or False
"""
a = self
return self._lex_lte(v - a.lower_bound, a.upper_bound - a.lower_bound, a.bits)
def _wrapped_lte(self, b):
"""
Perform a wrapped LTE comparison based on the poset ordering
:param a: The first operand
:param b: The second operand
:return: True if a <= b, False otherwise
"""
a = self
if a.is_empty:
return True
if a.is_top and b.is_top:
return True
elif a.is_top:
return False
elif b.is_top:
return True
if b._wrapped_member(a.lower_bound) and b._wrapped_member(a.upper_bound):
if ((b.lower_bound == a.lower_bound and b.upper_bound == a.upper_bound)
or not a._wrapped_member(b.lower_bound) or not a._wrapped_member(b.upper_bound)):
return True
return False
#
# Arithmetic operations
#
def neg(self):
"""
Unary operation: neg
:return: 0 - self
"""
return StridedInterval(bits=self.bits, stride=0, lower_bound=0, upper_bound=0).sub(self)
def add(self, b):
"""
Binary operation: add
:param b: The other operand
:return: self + b
"""
new_bits = max(self.bits, b.bits)
# TODO: Some improvements can be made here regarding the following case
# TODO: SI<16>0xff[0x0, 0xff] + 3
# TODO: In current implementation, it overflows, but it doesn't have to
overflow = self._wrappedoverflow_add(self, b)
if overflow:
return StridedInterval.top(self.bits)
lb = self._modular_add(self.lower_bound, b.lower_bound, new_bits)
ub = self._modular_add(self.upper_bound, b.upper_bound, new_bits)
# Is it initialized?
uninitialized = self.uninitialized or b.uninitialized
# Take the GCD of two operands' strides
stride = fractions.gcd(self.stride, b.stride)
return StridedInterval(bits=new_bits, stride=stride, lower_bound=lb, upper_bound=ub,
uninitialized=uninitialized)
def sub(self, b):
"""
Binary operation: sub
:param b: The other operand
:return: self - b
"""
new_bits = max(self.bits, b.bits)
overflow = self._wrappedoverflow_sub(self, b)
if overflow:
return StridedInterval.top(self.bits)
lb = self._modular_sub(self.lower_bound, b.upper_bound, new_bits)
ub = self._modular_sub(self.upper_bound, b.lower_bound, new_bits)
# Is it initialized?
uninitialized = self.uninitialized or b.uninitialized
# Take the GCD of two operands' strides
stride = fractions.gcd(self.stride, b.stride)
return StridedInterval(bits=new_bits, stride=stride, lower_bound=lb, upper_bound=ub,
uninitialized=uninitialized)
def mul(self, o):
"""
Binary operation: multiplication
:param o: The other operand
:return: self * o
"""
if self.is_integer and o.is_integer:
# Two integers!
a, b = self.lower_bound, o.lower_bound
ret = StridedInterval(bits=self.bits,
stride=0,
lower_bound=a * b,
upper_bound=a * b
)
return ret.normalize()
else:
# All other cases
# Cut from both north pole and south pole
si1_psplit = self._psplit()
si2_psplit = o._psplit()
ret = None
for si1 in si1_psplit:
for si2 in si2_psplit:
tmp_unsigned_mul = self._wrapped_unsigned_mul(si1, si2)
tmp_signed_mul = self._wrapped_signed_mul(si1, si2)
tmp_meet = tmp_unsigned_mul.intersection(tmp_signed_mul)
if ret is None:
ret = tmp_meet
else:
ret = ret.union(tmp_meet)
return ret.normalize()
def sdiv(self, o):
"""
Binary operation: signed division
:param o: The divisor
:return: (self / o) in signed arithmetic
"""
splitted_dividends = self._nsplit()
splitted_divisors = o._nsplit()
ret = self.empty(self.bits)
for dividend in splitted_dividends:
for divisor in splitted_divisors:
tmp = self._wrapped_signed_div(dividend, divisor)
ret = ret.union(tmp)
return ret.normalize()
def udiv(self, o):
"""
Binary operation: unsigned division
:param o: The divisor
:return: (self / o) in unsigned arithmetic
"""
splitted_dividends = self._ssplit()
splitted_divisors = o._ssplit()
ret = self.empty(self.bits)
for dividend in splitted_dividends:
for divisor in splitted_divisors:
tmp = self._wrapped_unsigned_div(dividend, divisor)
ret = ret.union(tmp)
return ret.normalize()
def bitwise_not(self):
"""
Unary operation: bitwise not
:return: ~self
"""
splitted_si = self._ssplit()
ret = StridedInterval.empty(self.bits)
for si in splitted_si:
lb = ~self.upper_bound
ub = ~self.lower_bound
stride = self.stride
tmp = StridedInterval(bits=self.bits, stride=stride, lower_bound=lb, upper_bound=ub)
ret = ret.union(tmp)
return ret
@staticmethod
def min_or(k, a, b, c, d):
m = StridedInterval.highbit(k)
ret = 0
while True:
if m == 0:
ret = a | c
break
elif (~a & c & m) != 0:
tmp = (a | m) & -m
if tmp <= b:
ret = tmp | c
break
elif (a & ~c & m) != 0:
tmp = (c | m) & -m
if tmp <= d:
ret = tmp | a
break
m = m >> 1
return ret
@staticmethod
def max_or(k, a, b, c, d):
m = StridedInterval.highbit(k)
while True:
if m == 0:
return b | d
elif (b & d & m) != 0:
tmp1 = (b - m) | (m - 1)
tmp2 = (d - m) | (m - 1)
if tmp1 >= a:
return tmp1 | d
elif tmp2 >= c:
return tmp2 | b
m = m >> 1
def bitwise_or(self, b):
"""
Binary operation: logical or
:param b: The other operand
:return: self | b
"""
splitted_a = self._ssplit()
splitted_b = b._ssplit()
ret = StridedInterval.empty(self.bits)
for x in splitted_a:
for y in splitted_b:
tmp = self._wrapped_bitwise_or(x, y)
ret = ret.union(tmp)
return ret.normalize()
def bitwise_and(self, b):
"""
Binary operation: logical and
:param b: The other operand
:return:
"""
splitted_a = self._ssplit()
splitted_b = b._ssplit()
ret = StridedInterval.empty(self.bits)
for x in splitted_a:
for y in splitted_b:
tmp = self._wrapped_bitwise_and(x, y)
ret = ret.union(tmp)
return ret.normalize()
def bitwise_xor(self, b):
'''
Operation xor
:param b: The other operand
:return:
'''
return self.bitwise_not().bitwise_or(b).bitwise_not().bitwise_or(b.bitwise_not().bitwise_or(self).bitwise_not())
def _pre_shift(self, shift_amount):
def get_range(expr):
'''
Get the range of bits for shifting
:param expr:
:return: A tuple of maximum and minimum bits to shift
'''
def round(max, x): #pylint:disable=redefined-builtin
if x < 0 or x > max:
return max
else:
return x
if type(expr) in [int, long]:
return (expr, expr)
assert type(expr) is StridedInterval
if expr.is_integer:
return (round(self.bits, expr.lower_bound),
round(self.bits, expr.lower_bound))
else:
if expr.lower_bound < 0:
if expr.upper_bound >= 0:
return (0, self.bits)
else:
return (self.bits, self.bits)
else:
return (round(self.bits, self.lower_bound), round(self.bits, self.upper_bound))
lower, upper = get_range(shift_amount)
# TODO: Is trancating necessary?
return lower, upper
def rshift(self, shift_amount):
lower, upper = self._pre_shift(shift_amount)
# Shift the lower_bound and upper_bound by all possible amounts, and
# get min/max values from all the resulting values
new_lower_bound = None
new_upper_bound = None
for shift_amount in xrange(lower, upper + 1):
l = self.lower_bound >> shift_amount
if new_lower_bound is None or l < new_lower_bound:
new_lower_bound = l
u = self.upper_bound >> shift_amount
if new_upper_bound is None or u > new_upper_bound:
new_upper_bound = u
# NOTE: If this is an arithmetic operation, we should take care
# of sign-changes.
ret = StridedInterval(bits=self.bits,
stride=max(self.stride >> upper, 1),
lower_bound=new_lower_bound,
upper_bound=new_upper_bound)
ret.normalize()
return ret
def lshift(self, shift_amount):
lower, upper = self._pre_shift(shift_amount)
# Shift the lower_bound and upper_bound by all possible amounts, and
# get min/max values from all the resulting values
new_lower_bound = None
new_upper_bound = None
for shift_amount in xrange(lower, upper + 1):
l = self.lower_bound << shift_amount
if new_lower_bound is None or l < new_lower_bound:
new_lower_bound = l
u = self.upper_bound << shift_amount
if new_upper_bound is None or u > new_upper_bound:
new_upper_bound = u
# NOTE: If this is an arithmetic operation, we should take care
# of sign-changes.
ret = StridedInterval(bits=self.bits,
stride=max(self.stride << lower, 1),
lower_bound=new_lower_bound,
upper_bound=new_upper_bound)
ret.normalize()
return ret
def cast_low(self, tok):
assert tok <= self.bits
if tok == self.bits:
return self.copy()
else:
# Calcualte the new upper bound and lower bound
mask = (1 << tok) - 1
if (self.lower_bound & mask) == self.lower_bound and \
(self.upper_bound & mask) == self.upper_bound:
return StridedInterval(bits=tok, stride=self.stride,
lower_bound=self.lower_bound,
upper_bound=self.upper_bound)
elif self.upper_bound - self.lower_bound <= mask:
l = self.lower_bound & mask
u = self.upper_bound & mask
# Keep the signs!
if self.lower_bound < 0:
l = StridedInterval._to_negative(l, tok)
if self.upper_bound < 0:
u = StridedInterval._to_negative(u, tok)
return StridedInterval(bits=tok, stride=self.stride,
lower_bound=l,
upper_bound=u)
elif (self.upper_bound & mask == self.lower_bound & mask) and \
((self.upper_bound - self.lower_bound) & mask == 0):
# This operation doesn't affect the stride. Stride should be 0 then.
bound = self.lower_bound & mask
return StridedInterval(bits=tok,
stride=0,
lower_bound=bound,
upper_bound=bound)
else:
# TODO: How can we do better here? For example, keep the stride information?
return self.top(tok)
@normalize_types
def concat(self, b):
# Zero-extend
a = self.nameless_copy()
a._bits += b.bits
new_si = a.lshift(b.bits)
new_b = b.copy()
# Zero-extend b
new_b._bits = new_si.bits
if new_si.is_integer:
# We can be more precise!
new_si._bits = new_b.bits
new_si._stride = new_b.stride
new_si._lower_bound = new_si.lower_bound + b.lower_bound
new_si._upper_bound = new_si.upper_bound + b.upper_bound
return new_si
else:
return new_si.bitwise_or(new_b)
def extract(self, high_bit, low_bit):
if self._reversed:
reversed = self._reverse()
return reversed.extract(high_bit, low_bit)
assert low_bit >= 0
bits = high_bit - low_bit + 1
if low_bit != 0:
ret = self.rshift(low_bit)
else:
ret = self.copy()
if bits != self.bits:
ret = ret.cast_low(bits)
return ret.normalize()
def sign_extend(self, new_length):
"""
Unary operation: SignExtend
:param new_length: New length after sign-extension
:return: A new StridedInterval
"""
msb = self.extract(self.bits - 1, self.bits - 1).eval(2)
if msb == [ 0 ]:
# All positive numbers
return self.zero_extend(new_length)
if msb == [ 1 ]:
# All negative numbers
si = self.copy()
si._bits = new_length
mask = (2 ** new_length - 1) - (2 ** self.bits - 1)
si._lower_bound = si._lower_bound | mask
si._upper_bound = si._upper_bound | mask
else:
# Both positive numbers and negative numbers
numbers = self._nsplit()
# Since there are both positive and negative numbers, there must be two bounds after nsplit
# assert len(numbers) == 2
si = self.empty(new_length)
for n in numbers:
a, b = n.lower_bound, n.upper_bound
if b < 2 ** (n.bits - 1):
# msb = 0
si_ = StridedInterval(bits=new_length, stride=n.stride, lower_bound=a, upper_bound=b)
else:
# msb = 1
mask = (2 ** new_length - 1) - (2 ** self.bits - 1)
si_ = StridedInterval(bits=new_length, stride=n.stride, lower_bound=a | mask, upper_bound=b | mask)
si = si.union(si_)
return si
def zero_extend(self, new_length):
"""
Unary operation: ZeroExtend
:param new_length: New length after zero-extension
:return: A new StridedInterval
"""
si = self.copy()
si._bits = new_length
return si
@normalize_types
def union(self, b):
"""
The union operation. It might return a DiscreteStridedIntervalSet to allow for better precision in analysis.
:param b: Operand
:return: A new DiscreteStridedIntervalSet, or a new StridedInterval.
"""
if not allow_dsis:
return self._union(b)
else:
if self.cardinality > discrete_strided_interval_set.MAX_CARDINALITY_WITHOUT_COLLAPSING or \
b.cardinality > discrete_strided_interval_set:
return self._union(b)
else:
dsis = DiscreteStridedIntervalSet(bits=self._bits, si_set={ self })
return dsis.union(b)
@normalize_types
def _union(self, b):
"""
Binary operation: union
It's also the join operation.
:param b: The other operand.
:return: A new StridedInterval
"""
if self._reversed != b._reversed:
logger.warning('Incoherent reversed flag between operands %s and %s', self, b)
#
# Trivial cases
#
if self.is_empty:
return b
if b.is_empty:
return self
if self.is_integer and b.is_integer:
u = max(self.upper_bound, b.upper_bound)
l = min(self.lower_bound, b.lower_bound)
stride = abs(u - l)
return StridedInterval(bits=self.bits, stride=stride, lower_bound=l, upper_bound=u)
#
# Other cases
#
# Determine the new stride
if self.is_integer:
new_stride = fractions.gcd(self._modular_sub(self.lower_bound, b.lower_bound, self.bits), b.stride)
elif b.is_integer:
new_stride = fractions.gcd(self.stride, self._modular_sub(b.lower_bound, self.lower_bound, self.bits))
else:
new_stride = fractions.gcd(self.stride, b.stride)
remainder_1 = self.lower_bound % new_stride if new_stride > 0 else 0
remainder_2 = b.lower_bound % new_stride if new_stride > 0 else 0
if remainder_1 != remainder_2:
new_stride = fractions.gcd(abs(remainder_1 - remainder_2), new_stride)
# Then we have different cases
if self._wrapped_lte(b):
# Containment
return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=b.lower_bound,
upper_bound=b.upper_bound)
elif b._wrapped_lte(self):
# Containment
# TODO: This case is missing in the original implementation. Is that a bug?
return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=self.lower_bound,
upper_bound=self.upper_bound)
elif (self._wrapped_member(b.lower_bound) and self._wrapped_member(b.upper_bound) and
b._wrapped_member(self.lower_bound) and b._wrapped_member(self.upper_bound)):
# The union of them covers the entire sphere
return StridedInterval.top(self.bits)
elif self._wrapped_member(b.lower_bound):
# Overlapping
return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=self.lower_bound,
upper_bound=b.upper_bound)
elif b._wrapped_member(self.lower_bound):
# Overlapping
return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=b.lower_bound,
upper_bound=self.upper_bound)
else:
card_1 = self._wrapped_cardinality(self.upper_bound, b.lower_bound, self.bits)
card_2 = self._wrapped_cardinality(b.upper_bound, self.lower_bound, self.bits)
if card_1 == card_2:
# Left/right leaning cases
if self._lex_lt(self.lower_bound, b.lower_bound, self.bits):
return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=self.lower_bound,
upper_bound=b.upper_bound)
else:
return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=b.lower_bound,
upper_bound=self.upper_bound)
elif card_1 < card_2:
# non-overlapping case (left)
return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=self.lower_bound,
upper_bound=b.upper_bound)
else:
# non-overlapping case (right)
return StridedInterval(bits=self.bits, stride=new_stride, lower_bound=b.lower_bound,
upper_bound=self.upper_bound)
def _minimum_intersection_integer(self, other, lb_from_self):
"""
Solves for the minimum integer that exists in both StridedIntervals
:param other: The other operand
:param lb_from_self: True/False. If True, then we have `other` contains `self` or `other` contains
`self`.lower_bound, and vice versa
:return: The minimum integer if there is one, or None if it doesn't exist.
"""
# It's equivalent to find a integral solution for equation `ax + b = cy + d` that makes `ax + b` minimal
# Some assumptions:
# a, b, c, d are all positive integers
# x >= 0, y >= 0
a, b, c, d = self.stride, self.lower_bound, other.stride, other.lower_bound
if (d - b) % self.lcm(a, c) != 0:
# They don't overlap
return None
if c % a:
p = c / a
if not lb_from_self:
k1 = (d - b) / a # It must be an integer
k = int(k1 + 0.5)
else:
k2 = (b - d) * (c * 1.0 / a - p) / c + (d - b) / a
k = int(k2 + 0.5)
y = (k - (d - b) / a) / (c * 1.0 / a - p)
first_integer = int(c * y + d)
else:
if lb_from_self:
first_integer = b
else:
first_integer = d
if self._wrapped_member(first_integer) and \
self._modular_sub(first_integer, self.lower_bound, self.bits) % self.stride == 0 and \
other._wrapped_member(first_integer) and \
other._modular_sub(first_integer, other.lower_bound, other.bits) % other.stride == 0:
return first_integer
else:
return None
@normalize_types
def intersection(self, b):
if self.is_empty or b.is_empty:
return StridedInterval.empty(self.bits)
assert self.bits == b.bits
if self.is_integer and b.is_integer:
if self.lower_bound == b.lower_bound:
# They are the same number!
ret = StridedInterval(bits=self.bits,
stride=0,
lower_bound=self.lower_bound,
upper_bound=self.lower_bound)
else:
ret = StridedInterval.empty(self.bits)
elif self.is_integer:
integer = self.lower_bound
if (b.lower_bound - integer) % b.stride == 0 and \
b._wrapped_member(integer):
ret = StridedInterval(bits=self.bits,
stride=0,
lower_bound=integer,
upper_bound=integer)
else:
ret = StridedInterval.empty(self.bits)
elif b.is_integer:
integer = b.lower_bound
if (integer - self.lower_bound) % self.stride == 0 and \
self._wrapped_member(integer):
ret = StridedInterval(bits=self.bits,
stride=0,
lower_bound=integer,
upper_bound=integer)
else:
ret = StridedInterval.empty(self.bits)
else:
# None of the operands is an integer
new_stride = self.lcm(self.stride, b.stride)
if self._wrapped_lte(b):
# `b` may fully contain `self`
lb = self._minimum_intersection_integer(b, True)
if lb is None:
ret = StridedInterval.empty(self.bits)
else:
ub = self._modular_add(
self._modular_sub(self.upper_bound, lb, self.bits) / new_stride * new_stride,
lb,
self.bits
)
ret = StridedInterval(bits=self.bits,
stride=new_stride,
lower_bound=lb,
upper_bound=ub
)
elif b._wrapped_lte(self):
# `self` contains `b`
lb = b._minimum_intersection_integer(self, True)
if lb is None:
ret = StridedInterval.empty(self.bits)
else:
ub = self._modular_add(
self._modular_sub(b.upper_bound, lb, self.bits) / new_stride * new_stride,
lb,
self.bits
)
ret = StridedInterval(bits=self.bits,
stride=new_stride,
lower_bound=lb,
upper_bound=ub
)
elif self._wrapped_member(b.lower_bound) and \
self._wrapped_member(b.upper_bound) and \
b._wrapped_member(self.lower_bound) and \
b._wrapped_member(self.upper_bound):
# One cover the other
card_1 = self._wrapped_cardinality(self.lower_bound, self.upper_bound, self.bits)
card_2 = self._wrapped_cardinality(b.lower_bound, b.upper_bound, b.bits)
if self._lex_lt(card_1, card_2, self.bits) or \
(card_1 == card_2 and self._lex_lte(self.lower_bound, b.lower_bound, self.bits)):
lb = self._minimum_intersection_integer(b, True)
if lb is None:
ret = StridedInterval.empty(self.bits)
else:
ub = self._modular_add(
self._modular_sub(self.upper_bound, lb, self.bits) / new_stride * new_stride,
lb,
self.bits
)
ret = StridedInterval(bits=self.bits,
stride=new_stride,
lower_bound=lb,
upper_bound=ub
)
else:
lb = self._minimum_intersection_integer(b, False)
if lb is None:
ret = StridedInterval.empty(self.bits)
else:
ub = self._modular_add(
self._modular_sub(b.upper_bound, lb, self.bits) / new_stride * new_stride,
lb,
self.bits
)
ret = StridedInterval(bits=self.bits,
stride=new_stride,
lower_bound=lb,
upper_bound=ub
)
elif self._wrapped_member(b.lower_bound):
# Overlapping
lb = b._minimum_intersection_integer(self, True)
if lb is None:
ret = StridedInterval.empty(self.bits)
else:
ub = self._modular_add(
self._modular_sub(self.upper_bound, lb, self.bits) / new_stride * new_stride,
lb,
self.bits
)
ret = StridedInterval(bits=self.bits,
stride=new_stride,
lower_bound=lb,
upper_bound=ub
)
elif b._wrapped_member(self.lower_bound):
# Overlapping
lb = self._minimum_intersection_integer(b, True)
if lb is None:
ret = StridedInterval.empty(self.bits)
else:
ub = self._modular_add(
self._modular_sub(b.upper_bound, lb, self.bits) / new_stride * new_stride,
lb,
self.bits
)
ret = StridedInterval(bits=self.bits,
stride=new_stride,
lower_bound=lb,
upper_bound=ub
)
else:
# Disjoint
ret = StridedInterval.empty(self.bits)
ret.normalize()
return ret
@normalize_types
def widen(self, b):
ret = None
if self.is_empty and not b.is_empty:
ret = StridedInterval.top(bits=self.bits)
elif self.is_empty:
ret = b
elif b.is_empty:
ret = self
else:
new_stride = fractions.gcd(self.stride, b.stride)
l = StridedInterval.lower(self.bits, self.lower_bound, new_stride) + 2 if b.lower_bound < self.lower_bound else self.lower_bound
u = StridedInterval.upper(self.bits, self.upper_bound, new_stride) - 2 if b.upper_bound > self.upper_bound else self.upper_bound
if new_stride == 0:
if self.is_integer and b.is_integer:
ret = StridedInterval(bits=self.bits, stride=u - l, lower_bound=l, upper_bound=u)
else:
raise ClaripyOperationError('SI: operands are not reduced.')
else:
ret = StridedInterval(bits=self.bits, stride=new_stride, lower_bound=l, upper_bound=u)
ret.normalize()
return ret
def reverse(self):
if self.bits == 8:
# We cannot reverse a one-byte value
return self.copy()
si = self.copy()
si._reversed = not si._reversed
return si
def _reverse(self):
"""
This function does the reversing for real.
:return: A new reversed StridedInterval instance
"""
o = self.copy()
# Clear the reversed flag
o._reversed = not o._reversed
if o.bits == 8:
# No need for reversing
return o.copy()
if o.is_top:
# A TOP is still a TOP after reversing
si = o.copy()
return si
else:
if not o.is_integer:
# We really don't want to do that. Something is wrong.
logger.warning('Reversing a real strided-interval %s is bad', self)
# Reversing an integer is easy
rounded_bits = ((o.bits + 7) / 8) * 8
list_bytes = [ ]
si = None
for i in xrange(0, rounded_bits, 8):
b = o.extract(min(i + 7, o.bits - 1), i)
list_bytes.append(b)
for b in list_bytes:
si = b if si is None else si.concat(b)
return si
def CreateStridedInterval(name=None, bits=0, stride=None, lower_bound=None, upper_bound=None, to_conv=None):
'''
:param name:
:param bits:
:param stride:
:param lower_bound:
:param upper_bound:
:param to_conv:
:return:
'''
if to_conv is not None:
if isinstance(to_conv, Base):
to_conv = to_conv.model
if isinstance(to_conv, StridedInterval):
# No conversion will be done
return to_conv
if type(to_conv) not in {int, long, BVV}: #pylint:disable=unidiomatic-typecheck
raise ClaripyOperationError('Unsupported to_conv type %s' % type(to_conv))
if stride is not None or lower_bound is not None or \
upper_bound is not None:
raise ClaripyOperationError('You cannot specify both to_conv and other parameters at the same time.')
if type(to_conv) is BVV: #pylint:disable=unidiomatic-typecheck
bits = to_conv.bits
to_conv_value = to_conv.value
else:
bits = bits
to_conv_value = to_conv
stride = 0
lower_bound = to_conv_value
upper_bound = to_conv_value
bi = StridedInterval(name=name,
bits=bits,
stride=stride,
lower_bound=lower_bound,
upper_bound=upper_bound)
return bi
from .errors import ClaripyVSAError
from ..errors import ClaripyOperationError
from .bool_result import TrueResult, FalseResult, MaybeResult
from . import discrete_strided_interval_set
from .discrete_strided_interval_set import DiscreteStridedIntervalSet
from .valueset import ValueSet
from .ifproxy import IfProxy
from ..ast.base import Base
from ..bv import BVV
|
avain/claripy
|
claripy/vsa/strided_interval.py
|
Python
|
bsd-2-clause
| 79,506
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'Said Sef'
|
saidsef/cloudflare
|
lib/__init__.py
|
Python
|
mit
| 67
|
import sys
import unittest
from .. import distribution, entry_points, files, PackageNotFoundError, version
try:
from importlib.resources import path
except ImportError:
from importlib_resources import path
try:
from contextlib import ExitStack
except ImportError:
from contextlib2 import ExitStack
class TestZip(unittest.TestCase):
root = 'importlib_metadata.tests.data'
def setUp(self):
# Find the path to the example-*.whl so we can add it to the front of
# sys.path, where we'll then try to find the metadata thereof.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
wheel = self.resources.enter_context(
path(self.root, 'example-21.12-py3-none-any.whl'))
sys.path.insert(0, str(wheel))
self.resources.callback(sys.path.pop, 0)
def test_zip_version(self):
self.assertEqual(version('example'), '21.12')
def test_zip_version_does_not_match(self):
with self.assertRaises(PackageNotFoundError):
version('definitely-not-installed')
def test_zip_entry_points(self):
scripts = dict(entry_points()['console_scripts'])
entry_point = scripts['example']
self.assertEqual(entry_point.value, 'example:main')
entry_point = scripts['Example']
self.assertEqual(entry_point.value, 'example:main')
def test_missing_metadata(self):
self.assertIsNone(distribution('example').read_text('does not exist'))
def test_case_insensitive(self):
self.assertEqual(version('Example'), '21.12')
def test_files(self):
for file in files('example'):
path = str(file.dist.locate_file(file))
assert '.whl/' in path, path
class TestEgg(TestZip):
def setUp(self):
# Find the path to the example-*.egg so we can add it to the front of
# sys.path, where we'll then try to find the metadata thereof.
self.resources = ExitStack()
self.addCleanup(self.resources.close)
egg = self.resources.enter_context(
path(self.root, 'example-21.12-py3.6.egg'))
sys.path.insert(0, str(egg))
self.resources.callback(sys.path.pop, 0)
def test_files(self):
for file in files('example'):
path = str(file.dist.locate_file(file))
assert '.egg/' in path, path
|
randyzingle/tools
|
kub/services/archive/cdk/python/sample-app/.env/lib/python3.6/site-packages/importlib_metadata/tests/test_zip.py
|
Python
|
apache-2.0
| 2,372
|
### KMP
def prefix_function(s):
'''Creates the prefix function array for the given string s, to be used in KMP or similar'''
pi = [0]
n = len(s)
for i in range(1, n):
j = pi[i - 1]
while j > 0 and s[i] != s[j]:
j = pi[j - 1]
if s[i] == s[j]:
j += 1
pi.append(j)
return pi
### Works, but timed out on some codeforces tests. May need to reimplement in C++
def z_function(s):
'''Creates the Z-function array for the given string s'''
z = [0] * n
n = len(s)
l = 0
r = 1
for i in range(1, n):
if i < r:
z[i] = min(z[i - l], r - i)
while i + z[i] < n and s[i + z[i]] == s[z[i]]:
z[i] += 1
if i + z[i] >= r:
l = i
r = z[i]
return z
### Not Battle Tested
def suffix_array(s):
'''Creates a suffix array for the given string s in n(log(n))^2 time, because I'm lazy'''
n = len(s)
ranges = [n] + sorted([i for i in range(n)], key = lambda x: s[x])
order = [0] * (n + 1)
l = 1 ## interval length
while (l < 2 * n):
for (ord, ind) in enumerate(ranges):
order[ind] = ord
ranges = sorted([i for i in range(n + 1)],
key = lambda x: (order[x], order[(x + l) % (n + 1)]))
l *= 2
return [i - 1 for i in order[0:n]]
|
heffalump/sketches
|
algorithms/strings.py
|
Python
|
bsd-2-clause
| 1,374
|
# Write a function named first_number that takes a string as an argument. The function should search, with a regular expression,
# the first number in the string and return the match object.
import re
def first_number(string):
return re.search(r'\d', string)
# Now, write a function named numbers() that takes two arguments: a count as an integer and a string.
# Return an re.search for exactly count numbers in the string. Remember, you can multiply strings and integers to create your pattern.
def numbers(count, string):
return re.search(r'\d' * count, string)
|
CaseyNord/Treehouse
|
Regular Expressions in Python/escapes.py
|
Python
|
mit
| 587
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implements a variant merge stategy that moves fields to calls."""
import hashlib
import re
from typing import Iterable, Set # pylint: disable=unused-import
from apache_beam.io.gcp.internal.clients import bigquery # pylint: disable=unused-import
from gcp_variant_transforms.beam_io.vcfio import Variant
from gcp_variant_transforms.libs import bigquery_util
from gcp_variant_transforms.libs.variant_merge import variant_merge_strategy
__all__ = ['MoveToCallsStrategy']
class MoveToCallsStrategy(variant_merge_strategy.VariantMergeStrategy):
"""A merging strategy that moves fields to the corresponding calls records.
Variants will be merged across files using
'reference_name:start:end:reference_bases:alternate_bases' as key. INFO
fields would be moved to calls if they match
`info_keys_to_move_to_calls_regex`. Otherwise, one will be chosen as
representatve (in no particular order) among the merged variants.
Filters will be merged across all variants matching the key and the highest
quality score will be chosen as representative for the merged variants.
The filters and quality fields can be optionally copied to their associated
calls using `copy_quality_to_calls` and `copy_filter_to_calls` options.
Note: if a field is set to be moved from INFO to calls, then it must not
already exist in calls (i.e. specified by FORMAT in the VCF header).
"""
def __init__(self, info_keys_to_move_to_calls_regex, copy_quality_to_calls,
copy_filter_to_calls):
# type: (str, bool, bool) -> None
"""Initializes the strategy.
Args:
info_keys_to_move_to_calls_regex: A regular expression specifying info
fields that should be moved to calls.
copy_quality_to_calls: Whether to copy the quality field to the associated
calls in each record.
copy_filter_to_calls: Whether to copy filter field to the associated calls
in each record.
"""
self._info_keys_to_move_to_calls_re = (
re.compile(info_keys_to_move_to_calls_regex)
if info_keys_to_move_to_calls_regex else None)
self._copy_quality_to_calls = copy_quality_to_calls
self._copy_filter_to_calls = copy_filter_to_calls
def move_data_to_calls(self, variant):
# type: (Variant) -> None
"""Moves filters, calls, and info items to the variant's calls based on the
strategy's initialization parameters.
Args:
variant: The variant whose filters, quality, and info items will be moved
to its calls if specified.
"""
additional_call_info = {}
if self._should_copy_filter_to_calls():
additional_call_info[
bigquery_util.ColumnKeyConstants.FILTER] = variant.filters
if self._should_copy_quality_to_calls():
additional_call_info[
bigquery_util.ColumnKeyConstants.QUALITY] = variant.quality
for info_key, info_value in variant.info.items():
if self._should_move_info_key_to_calls(info_key):
additional_call_info[info_key] = info_value
for call in variant.calls:
call.info.update(additional_call_info)
def move_data_to_merged(self, variant, merged_variant):
# type: (Variant, Variant) -> None
"""Moves items from the variant's info to merged_variant.
Args:
variant: The variant whose info items will be moved to `merged_variant` if
specified.
merged_variant: The variant who will receive the info items of `variant`
if specified.
"""
for info_key, info_value in variant.info.items():
if not self._should_move_info_key_to_calls(info_key):
merged_variant.info[info_key] = info_value
def get_merged_variants(self, variants, unused_key=None):
# type: (List[Variant], str) -> List[Variant]
if not variants:
return []
merged_variant = None
for variant in variants:
if not merged_variant:
merged_variant = Variant(reference_name=variant.reference_name,
start=variant.start,
end=variant.end,
reference_bases=variant.reference_bases,
alternate_bases=variant.alternate_bases)
# Since we use hash function in generating the merge key, there is
# a chance (extremely low though) to have variants with different
# `reference_bases` or `alternate_base` here due to a collision in
# the hash function.
assert variant.reference_bases == merged_variant.reference_bases, (
'Cannot merge variants with different reference bases. {} vs {}'
.format(variant.reference_bases, merged_variant.reference_bases))
assert variant.alternate_bases == merged_variant.alternate_bases, (
'Cannot merge variants with different alternate bases. {} vs {}'
.format(variant.alternate_bases, merged_variant.alternate_bases))
merged_variant.names.extend(variant.names)
merged_variant.filters.extend(variant.filters)
if (merged_variant.quality is not None and
variant.quality is not None):
merged_variant.quality = max(merged_variant.quality, variant.quality)
elif merged_variant.quality is None:
merged_variant.quality = variant.quality
self.move_data_to_calls(variant)
self.move_data_to_merged(variant, merged_variant)
merged_variant.calls.extend(variant.calls)
# Deduplicate names and filters.
merged_variant.names = sorted(set(merged_variant.names))
merged_variant.filters = sorted(set(merged_variant.filters))
return [merged_variant]
def get_merge_keys(self, variant):
yield ':'.join(
[str(x) for x in [
variant.reference_name or '',
variant.start or '',
variant.end or '',
self._get_hash(variant.reference_bases or ''),
self._get_hash(','.join(variant.alternate_bases or []))]])
def modify_bigquery_schema(self, schema, info_keys):
# type: (bigquery.TableSchema, Set[str]) -> None
# Find the calls record so that it's easier to reference it below.
calls_record = None
for field in schema.fields:
if field.name == bigquery_util.ColumnKeyConstants.CALLS:
calls_record = field
break
if not calls_record:
raise ValueError('calls record must exist in the schema.')
existing_calls_keys = {field.name for field in calls_record.fields}
updated_fields = []
for field in schema.fields:
if (self._should_copy_filter_to_calls() and
field.name == bigquery_util.ColumnKeyConstants.FILTER):
if bigquery_util.ColumnKeyConstants.FILTER in existing_calls_keys:
self._raise_duplicate_key_error(
bigquery_util.ColumnKeyConstants.FILTER,
'should_copy_filter_to_calls')
calls_record.fields.append(field)
updated_fields.append(field)
elif (self._should_copy_quality_to_calls() and
field.name == bigquery_util.ColumnKeyConstants.QUALITY):
if bigquery_util.ColumnKeyConstants.QUALITY in existing_calls_keys:
self._raise_duplicate_key_error(
bigquery_util.ColumnKeyConstants.QUALITY,
'should_copy_quality_to_calls')
calls_record.fields.append(field)
updated_fields.append(field)
elif (field.name in info_keys and
self._should_move_info_key_to_calls(field.name)):
if field.name in existing_calls_keys:
self._raise_duplicate_key_error(field.name,
'info_keys_to_move_to_calls_regex')
calls_record.fields.append(field)
else:
updated_fields.append(field)
schema.fields = updated_fields
def _get_hash(self, value):
return hashlib.md5(value.encode('utf-8')).hexdigest()
def _should_move_info_key_to_calls(self, info_key):
return bool(self._info_keys_to_move_to_calls_re and
self._info_keys_to_move_to_calls_re.match(info_key))
def _should_copy_filter_to_calls(self):
return self._copy_filter_to_calls
def _should_copy_quality_to_calls(self):
return self._copy_quality_to_calls
def _raise_duplicate_key_error(self, key, flag_name):
raise ValueError(
'The field "%s" already exists in calls, but %s flag also moves a '
'field with the same name to calls. Please either change the flag '
'or rename the field.' % (key, flag_name))
|
googlegenomics/gcp-variant-transforms
|
gcp_variant_transforms/libs/variant_merge/move_to_calls_strategy.py
|
Python
|
apache-2.0
| 9,013
|
"""Module to construct html data, shown on web page. TODO GREAT revision."""
import pp_utils
import lxml
from lxml.html import builder as E
class Saver():
"""returns objects to display at html page
can construct plain html too
"""
def __init__(self, beatmap_info):
self.beatmap_info = beatmap_info
def columns(self, pp, pcount, b_info, mod):
"""returns displayed row strings of maps table"""
artist = b_info['artist']
title = b_info['title']
version = b_info['version']
creator = b_info['creator']
diff_approach = b_info['diff_approach']
diff_size = b_info['diff_size']
total_length = b_info['total_length']
bpm = b_info['bpm']
if mod == 'Hard Rock':
diff_approach = pp_utils.hr_setting(diff_approach)
diff_size = pp_utils.hr_setting(diff_size)
if mod == 'Double Time':
diff_approach = pp_utils.calc_dt_ar(diff_approach)
bpm *= 1.5
total_length = int(total_length / 1.5)
s_fmt = '{a} - {t} [{v}] (by {c})'
s = s_fmt.format(a=artist, t=title, v=version, c=creator)
def fmt(real):
return "{:.2f}".format(real)
return s, str(pcount), str(int(pp)), fmt(diff_approach), fmt(diff_size), str(total_length), fmt(bpm)
def html_body(self, by_mod):
"""returns table in plain html"""
mods = ("No Mod", "Hard Rock", "Double Time")
names = ("map", "set by", "~approx pp", "AR", "BPM")
ref = 'http://osu.ppy.sh/b/'
body = []
body.append(E.H1(E.CLASS("heading"), "Farm Maps"))
for mod in mods:
body.append(E.A(mod))
body.append(E.BR())
table = E.TABLE()
colnames = (E.TH(n) for n in names)
table.append(E.TR(*colnames))
for item in by_mod[mod]:
b_id = item['beatmap_id']
pp = item['mean_pp']
pcount = item['players']
s, pcount, pp, diff_approach, diff_size, total_length, bpm = self.columns(
pp, pcount, self.beatmap_info[b_id], mod)
col1 = E.TD(E.A(s, href=ref+str(b_id)))
col2 = E.TD(pcount)
col3 = E.TD(pp)
col4 = E.TD(diff_approach)
col5 = E.TD(bpm)
table.append(E.TR(col1, col2, col3, col4, col5))
body.append(table)
body.append(E.BR())
return tuple(body)
def html(self, by_mod):
"""returns html page with table"""
body = self.html_body(by_mod)
html = E.HTML(
E.HEAD(
E.LINK(type="text/css"),
E.TITLE("Farm Maps")
),
E.BODY(
*body
)
)
return lxml.html.tostring(html).decode("utf-8")
def write_to_html(self, filename, by_mod):
"""saves html page to file"""
with open(filename + '.html', 'w') as f:
f.write(self.html(by_mod))
def to_rows(self, by_mod):
"""constructs table for specified mod"""
class Row():
def __init__(self, b_id, args):
self.link = "http://osu.ppy.sh/b/" + str(b_id)
self.title, self.pcount, self.pp, self.ar, self.cs, self.length, self.bpm = args
by_mod_rows = {}
for mod in by_mod:
by_mod_rows[mod] = []
for item in by_mod[mod]:
b_id = item['beatmap_id']
pp = item['mean_pp']
pcount = pcount = item['players']
b_info = self.beatmap_info[b_id]
row = Row(b_id, self.columns(pp, pcount, b_info, mod))
by_mod_rows[mod].append(row)
return by_mod_rows
|
slam3085/pp_strats
|
saver.py
|
Python
|
gpl-3.0
| 3,810
|
# Copyright (c) 2015-2020 Cloudify Platform Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from copy import deepcopy
from cloudify.state import current_ctx
from cloudify import mocks as cfy_mocks
from cloudify_azure.resources.compute.virtualmachine.virtualmachine_utils \
import (ordered,
diff_dictionaries,
check_if_configuration_changed)
class VirtualMachineTest(unittest.TestCase):
def _get_mock_context_for_run(self):
operation = {'name': 'cloudify.interfaces.lifecycle.create'}
fake_ctx = cfy_mocks.MockCloudifyContext(operation=operation)
instance = mock.Mock()
instance.runtime_properties = {}
fake_ctx._instance = instance
node = mock.Mock()
fake_ctx._node = node
node.properties = {}
node.runtime_properties = {}
node.type_hierarchy = ['ctx.nodes.Root']
fake_ctx.get_resource = mock.MagicMock(
return_value=""
)
return fake_ctx, node, instance
def setUp(self):
self.fake_ctx, self.node, self.instance = \
self._get_mock_context_for_run()
self.dummy_azure_credentials = {
'client_id': 'dummy',
'client_secret': 'dummy',
'subscription_id': 'dummy',
'tenant_id': 'dummy'
}
current_ctx.set(self.fake_ctx)
self.update_vm_config = {
'location': 'eastus2',
'tags': {'a': 'b', 'c': 'd'},
'availability_set': {
'id':
'/subscriptions/cfyinfraavailset1'},
'storage_profile': {'os_disk': {'name': 'demovm',
'vhd': {
'uri':
'http://demostorageaccount'
'.blob.core.windows.net/'
'vhds/demovm.vhd'},
'caching': 'ReadWrite',
'create_option':
'FromImage'},
'image_reference': {
'publisher': 'OpenLogic',
'offer': 'CentOS',
'sku': 7.6,
'version': 'latest'}},
'os_profile': {'computer_name': 'demovm',
'admin_username': 'centos',
'linux_configuration': {'ssh': {
'public_keys': [{
'key_data': 'ssh-rsa demokey',
'path':
'/home/centos/.ssh/authorized_keys'}]},
'disable_password_authentication': True}},
'hardware_profile': {'vm_size': 'Standard_B1s'}}
def test_ordered_simple_dict(self):
dict_a = {'a': 1, 'b': 2}
dict_b = {'b': 2, 'a': 1}
self.assertEquals(ordered(dict_a), ordered(dict_b))
def test_ordered_dict_with_list(self):
dict_a = {'a': [1, 2, 3]}
dict_b = {'a': [3, 2, 1]}
self.assertNotEquals(dict_a, dict_b)
self.assertEquals(ordered(dict_a), ordered(dict_b))
def test_ordered_recursive_integers_to_str(self):
dict_a = {'a': {'b': '2', 'c': '3'}}
dict_b = {'a': {'c': 3, 'b': 2}}
self.assertNotEquals(dict_a, dict_b)
self.assertEquals(ordered(dict_a), ordered(dict_b))
def test_ordered_recursive_list_in_list(self):
dict_a = {'a': [[1, 2, 3], [4, 5, 6]]}
dict_b = {'a': [[5, 4, 6], [2, 1, 3]]}
self.assertNotEquals(dict_a, dict_b)
self.assertEquals(ordered(dict_a), ordered(dict_b))
def test_diff_dictionaries(self):
update_conf = {'a': {'b': 2}}
current_conf = {'a': {'b': 2}}
self.assertEquals(diff_dictionaries(update_conf, current_conf), False)
def test_diff_dictionaries_current_conf_has_more_fields(self):
update_conf = {'a': {'b': 2}}
current_conf = {'a': {'b': 2, 'c': 3}}
self.assertEquals(diff_dictionaries(update_conf, current_conf), False)
def test_diff_dictionaries_update_conf_has_more_fields(self):
update_conf = {'a': {'b': 2}, 'c': 3}
current_conf = {'a': {'b': 2}}
self.assertEquals(diff_dictionaries(update_conf, current_conf),
True)
def test_diff_dictionaries_update_conf_has_more_fields_recursive(self):
update_conf = {'a': {'b': {'c': {'d': 4, 'e': 5}}}}
current_conf = {'a': {'b': {'c': {'d': 4}}}}
self.assertEquals(diff_dictionaries(update_conf, current_conf),
True)
def test_diff_dictionaries_with_list(self):
update_conf = {'a': {'b': [1, 2, 3]}}
current_conf = {'a': {'b': [3, 2, 1]}}
self.assertEquals(diff_dictionaries(update_conf, current_conf),
False)
def test_if_configuration_changed_same_conf(self):
self.assertEquals(
check_if_configuration_changed(self.fake_ctx,
self.update_vm_config,
self.update_vm_config), False)
def test_check_if_configuration_changed_same_conf(self):
self.assertEquals(
check_if_configuration_changed(self.fake_ctx,
self.update_vm_config,
self.update_vm_config), False)
def test_configuration_not_changed_more_elements_in_current_conf(self):
current_conf = deepcopy(self.update_vm_config)
current_conf['storage_profile']['os_disk']['disk_size_gb'] = 30
current_conf['id'] = 'foo'
self.assertEquals(check_if_configuration_changed(self.fake_ctx,
self.update_vm_config,
current_conf), False)
def test_configuration_changed(self):
current_conf = deepcopy(self.update_vm_config)
self.update_vm_config['availability_set']['id'] = 'foo'
self.assertEquals(check_if_configuration_changed(self.fake_ctx,
self.update_vm_config,
current_conf), True)
def test_configuration_changed_deep(self):
current_conf = deepcopy(self.update_vm_config)
self.update_vm_config['os_profile']['linux_configuration'][
'disable_password_authentication'] = False
self.assertEquals(check_if_configuration_changed(self.fake_ctx,
self.update_vm_config,
current_conf), True)
|
cloudify-cosmo/cloudify-azure-plugin
|
cloudify_azure/tests/resources/test_virtualmachine_utils.py
|
Python
|
apache-2.0
| 7,540
|
#!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test Qt with a copied construction environment.
"""
import TestSCons
test = TestSCons.TestSCons()
test.Qt_dummy_installation()
test.Qt_create_SConstruct('SConstruct')
test.write('SConscript', """\
Import("env")
env.Append(CPPDEFINES = ['FOOBAZ'])
copy = env.Clone()
copy.Append(CPPDEFINES = ['MYLIB_IMPL'])
copy.SharedLibrary(
target = 'MyLib',
source = ['MyFile.cpp','MyForm.ui']
)
""")
test.write('MyFile.h', r"""
void aaa(void);
""")
test.write('MyFile.cpp', r"""
#include "MyFile.h"
void useit() {
aaa();
}
""")
test.write('MyForm.ui', r"""
void aaa(void)
""")
test.run()
moc_MyForm = [x for x in test.stdout().split('\n') if x.find('moc_MyForm') != -1]
MYLIB_IMPL = [x for x in moc_MyForm if x.find('MYLIB_IMPL') != -1]
if not MYLIB_IMPL:
print "Did not find MYLIB_IMPL on moc_MyForm compilation line:"
print test.stdout()
test.fail_test()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
azverkan/scons
|
test/QT/copied-env.py
|
Python
|
mit
| 2,333
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from optionParser import OptionParser
from registration import registration
from urlparse import urlparse
import logging
import webbrowser
import httputils
import re
import wx
## OPTIONAL ##
# from usbkey import check_usb, move_on_key
SWN = 'MisuraInternet Speed Test'
logger = logging.getLogger(__name__)
#Data di scadenza
dead_date = 22221111
url_version = "https://speedtest.agcom244.fub.it/Version"
area_privata = "https://www.misurainternet.it" # /login_form.php"
class CheckSoftware():
def __init__(self, version):
parser = OptionParser(version = version, description = '')
(options, args, md5conf) = parser.parse()
self._httptimeout = options.httptimeout
self._clientid = options.clientid
self._thisVersion = version
self._lastVersion = version
self._stillDay = "unknown"
def _showDialog(self, dialog):
msgBox = wx.MessageDialog(None, dialog['message'], dialog['title'], dialog['style'])
res = msgBox.ShowModal()
msgBox.Destroy()
return res
def _softwareVersion(self):
versionOK = True
deadlineOK = True
url = urlparse(url_version)
connection = httputils.getverifiedconnection(url = url, certificate = None, timeout = self._httptimeout)
try:
connection.request('GET', '%s?speedtest=true&version=%s' % (url.path, self._thisVersion))
data = connection.getresponse().read()
#data = "1.1.1:8" # FAKE REPLY #
#logger.debug(data)
if (re.search('(\.?\d+)+:', data) is None):
logger.warning("Non e' stato possibile controllare la versione per risposta errata del server.")
return True
data = data.split(":")
#### VERSION ####
version = re.search('(\.?\d+)+',data[0])
'''
una stringa di uno o piu' numeri \d+
ozionalmente preceduta da un punto \.?
che si ripeta piu' volte (\.?\d+)+
'''
if (version is not None):
self._lastVersion = version.string
logger.info("L'ultima versione sul server e' la %s" % self._lastVersion)
if (self._thisVersion != self._lastVersion):
logger.info("Nuova versione disponbile. [ this:%s | last:%s ]" % (self._thisVersion, self._lastVersion))
newVersion = \
{ \
"style":wx.YES|wx.NO|wx.ICON_INFORMATION, \
"title":"%s %s" % (SWN, self._thisVersion), \
"message": \
'''
E' disponibile una nuova versione:
%s %s
E' possibile effetuare il download dalla relativa sezione
nell'area privata del sito www.misurainternet.it
Vuoi scaricare ora la nuova versione?
''' % (SWN, self._lastVersion)
}
res = self._showDialog(newVersion)
if res == wx.ID_YES:
versionOK = False
logger.info("Si e' scelto di scaricare la nuova versione del software.")
webbrowser.open(area_privata, new=2, autoraise=True)
return versionOK
else:
logger.info("Si e' scelto di continuare ad utilizzare la vecchia versione del software.")
versionOK = True
else:
versionOK = True
logger.info("E' in esecuzione l'ultima versione del software.")
else:
versionOK = True
logger.error("Errore nella verifica della presenza di una nuova versione.")
#### DEADLINE ####
deadline = re.search('(-?\d+)(?!.)',data[1])
'''
una stringa di uno o piu' numeri \d+
ozionalmente preceduta da un segno meno -?
ma che non abbia alcun carattere dopo (?!.)
'''
if (deadline is not None):
self._stillDay = deadline.string
logger.info("Giorni rimasti comunicati dal server: %s" % self._stillDay)
if (int(self._stillDay)>=0):
deadlineOK = True
logger.info("L'attuale versione %s scade fra %s giorni." % (self._thisVersion, self._stillDay))
beforeDeadline = \
{ \
"style":wx.OK|wx.ICON_EXCLAMATION, \
"title":"%s %s" % (SWN, self._thisVersion), \
"message": \
'''
Questa versione di %s
potra' essere utilizzata ancora per %s giorni.
''' % (SWN, self._stillDay)
}
res = self._showDialog(beforeDeadline)
else:
deadlineOK = False
self._stillDay = -(int(self._stillDay))
logger.info("L'attuale versione %s e' scaduta da %s giorni." % (self._thisVersion, self._stillDay))
afterDeadline = \
{ \
"style":wx.OK|wx.ICON_EXCLAMATION, \
"title":"%s %s" % (SWN, self._thisVersion), \
"message": \
'''
Questa versione di %s
e' scaduta da %s giorni e pertanto
non potra' piu' essere utilizzata.
''' % (SWN, self._stillDay)
}
res = self._showDialog(afterDeadline)
else:
deadlineOK = True
logger.info("Questa versione del software non ha ancora scadenza.")
except Exception as e:
logger.error("Impossibile controllare se ci sono nuove versioni. Errore: %s." % e)
return (versionOK and deadlineOK)
def _isRegistered(self):
regOK = registration(self._clientid)
return regOK
def _check_usbkey(self):
check = True
# if (not check_usb()):
# self._cycle.clear()
# logger.info('Verifica della presenza della chiave USB fallita')
# wx.CallAfter(self._gui._update_messages, "Per l'utilizzo di questo software occorre disporre della opportuna chiave USB. Inserire la chiave nel computer e riavviare il programma.", 'red')
return check
def checkIT(self):
checkOK = False
check_list = {1:self._softwareVersion,2:self._isRegistered}
for check in check_list:
checkOK = check_list[check]()
if not checkOK:
break
return checkOK
if __name__ == '__main__':
import log_conf
log_conf.init_log()
app = wx.App(False)
checker = CheckSoftware("1.1.2")
checker.checkIT()
|
fondazionebordoni/nemesys-speedtest
|
mist/checkSoftware.py
|
Python
|
gpl-3.0
| 7,421
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for slim.nets.vgg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.slim.nets import vgg
slim = tf.contrib.slim
class VGGATest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_a/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_a/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_a(inputs, num_classes, is_training=is_training)
expected_names = ['vgg_a/conv1/conv1_1',
'vgg_a/pool1',
'vgg_a/conv2/conv2_1',
'vgg_a/pool2',
'vgg_a/conv3/conv3_1',
'vgg_a/conv3/conv3_2',
'vgg_a/pool3',
'vgg_a/conv4/conv4_1',
'vgg_a/conv4/conv4_2',
'vgg_a/pool4',
'vgg_a/conv5/conv5_1',
'vgg_a/conv5/conv5_2',
'vgg_a/pool5',
'vgg_a/fc6',
'vgg_a/fc7',
'vgg_a/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
vgg.vgg_a(inputs, num_classes)
expected_names = ['vgg_a/conv1/conv1_1/weights',
'vgg_a/conv1/conv1_1/biases',
'vgg_a/conv2/conv2_1/weights',
'vgg_a/conv2/conv2_1/biases',
'vgg_a/conv3/conv3_1/weights',
'vgg_a/conv3/conv3_1/biases',
'vgg_a/conv3/conv3_2/weights',
'vgg_a/conv3/conv3_2/biases',
'vgg_a/conv4/conv4_1/weights',
'vgg_a/conv4/conv4_1/biases',
'vgg_a/conv4/conv4_2/weights',
'vgg_a/conv4/conv4_2/biases',
'vgg_a/conv5/conv5_1/weights',
'vgg_a/conv5/conv5_1/biases',
'vgg_a/conv5/conv5_2/weights',
'vgg_a/conv5/conv5_2/biases',
'vgg_a/fc6/weights',
'vgg_a/fc6/biases',
'vgg_a/fc7/weights',
'vgg_a/fc7/biases',
'vgg_a/fc8/weights',
'vgg_a/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_a(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_a(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_a(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG16Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_16/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_16/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_16(inputs, num_classes, is_training=is_training)
expected_names = ['vgg_16/conv1/conv1_1',
'vgg_16/conv1/conv1_2',
'vgg_16/pool1',
'vgg_16/conv2/conv2_1',
'vgg_16/conv2/conv2_2',
'vgg_16/pool2',
'vgg_16/conv3/conv3_1',
'vgg_16/conv3/conv3_2',
'vgg_16/conv3/conv3_3',
'vgg_16/pool3',
'vgg_16/conv4/conv4_1',
'vgg_16/conv4/conv4_2',
'vgg_16/conv4/conv4_3',
'vgg_16/pool4',
'vgg_16/conv5/conv5_1',
'vgg_16/conv5/conv5_2',
'vgg_16/conv5/conv5_3',
'vgg_16/pool5',
'vgg_16/fc6',
'vgg_16/fc7',
'vgg_16/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
vgg.vgg_16(inputs, num_classes)
expected_names = ['vgg_16/conv1/conv1_1/weights',
'vgg_16/conv1/conv1_1/biases',
'vgg_16/conv1/conv1_2/weights',
'vgg_16/conv1/conv1_2/biases',
'vgg_16/conv2/conv2_1/weights',
'vgg_16/conv2/conv2_1/biases',
'vgg_16/conv2/conv2_2/weights',
'vgg_16/conv2/conv2_2/biases',
'vgg_16/conv3/conv3_1/weights',
'vgg_16/conv3/conv3_1/biases',
'vgg_16/conv3/conv3_2/weights',
'vgg_16/conv3/conv3_2/biases',
'vgg_16/conv3/conv3_3/weights',
'vgg_16/conv3/conv3_3/biases',
'vgg_16/conv4/conv4_1/weights',
'vgg_16/conv4/conv4_1/biases',
'vgg_16/conv4/conv4_2/weights',
'vgg_16/conv4/conv4_2/biases',
'vgg_16/conv4/conv4_3/weights',
'vgg_16/conv4/conv4_3/biases',
'vgg_16/conv5/conv5_1/weights',
'vgg_16/conv5/conv5_1/biases',
'vgg_16/conv5/conv5_2/weights',
'vgg_16/conv5/conv5_2/biases',
'vgg_16/conv5/conv5_3/weights',
'vgg_16/conv5/conv5_3/biases',
'vgg_16/fc6/weights',
'vgg_16/fc6/biases',
'vgg_16/fc7/weights',
'vgg_16/fc7/biases',
'vgg_16/fc8/weights',
'vgg_16/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_16(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_16(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_16(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
class VGG19Test(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes)
self.assertEquals(logits.op.name, 'vgg_19/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 256, 256
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'vgg_19/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
for is_training in [True, False]:
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = vgg.vgg_19(inputs, num_classes, is_training=is_training)
expected_names = [
'vgg_19/conv1/conv1_1',
'vgg_19/conv1/conv1_2',
'vgg_19/pool1',
'vgg_19/conv2/conv2_1',
'vgg_19/conv2/conv2_2',
'vgg_19/pool2',
'vgg_19/conv3/conv3_1',
'vgg_19/conv3/conv3_2',
'vgg_19/conv3/conv3_3',
'vgg_19/conv3/conv3_4',
'vgg_19/pool3',
'vgg_19/conv4/conv4_1',
'vgg_19/conv4/conv4_2',
'vgg_19/conv4/conv4_3',
'vgg_19/conv4/conv4_4',
'vgg_19/pool4',
'vgg_19/conv5/conv5_1',
'vgg_19/conv5/conv5_2',
'vgg_19/conv5/conv5_3',
'vgg_19/conv5/conv5_4',
'vgg_19/pool5',
'vgg_19/fc6',
'vgg_19/fc7',
'vgg_19/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
vgg.vgg_19(inputs, num_classes)
expected_names = [
'vgg_19/conv1/conv1_1/weights',
'vgg_19/conv1/conv1_1/biases',
'vgg_19/conv1/conv1_2/weights',
'vgg_19/conv1/conv1_2/biases',
'vgg_19/conv2/conv2_1/weights',
'vgg_19/conv2/conv2_1/biases',
'vgg_19/conv2/conv2_2/weights',
'vgg_19/conv2/conv2_2/biases',
'vgg_19/conv3/conv3_1/weights',
'vgg_19/conv3/conv3_1/biases',
'vgg_19/conv3/conv3_2/weights',
'vgg_19/conv3/conv3_2/biases',
'vgg_19/conv3/conv3_3/weights',
'vgg_19/conv3/conv3_3/biases',
'vgg_19/conv3/conv3_4/weights',
'vgg_19/conv3/conv3_4/biases',
'vgg_19/conv4/conv4_1/weights',
'vgg_19/conv4/conv4_1/biases',
'vgg_19/conv4/conv4_2/weights',
'vgg_19/conv4/conv4_2/biases',
'vgg_19/conv4/conv4_3/weights',
'vgg_19/conv4/conv4_3/biases',
'vgg_19/conv4/conv4_4/weights',
'vgg_19/conv4/conv4_4/biases',
'vgg_19/conv5/conv5_1/weights',
'vgg_19/conv5/conv5_1/biases',
'vgg_19/conv5/conv5_2/weights',
'vgg_19/conv5/conv5_2/biases',
'vgg_19/conv5/conv5_3/weights',
'vgg_19/conv5/conv5_3/biases',
'vgg_19/conv5/conv5_4/weights',
'vgg_19/conv5/conv5_4/biases',
'vgg_19/fc6/weights',
'vgg_19/fc6/biases',
'vgg_19/fc7/weights',
'vgg_19/fc7/biases',
'vgg_19/fc8/weights',
'vgg_19/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 224, 224
eval_height, eval_width = 256, 256
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = vgg.vgg_19(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = vgg.vgg_19(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = vgg.vgg_19(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
|
laosiaudi/tensorflow
|
tensorflow/contrib/slim/python/slim/nets/vgg_test.py
|
Python
|
apache-2.0
| 18,550
|
from django.db import models
class Escolaridade(models.Model):
"""
Description: Model Description
"""
escolaridade = models.CharField(max_length=45)
class Meta:
pass
|
Bleno/sisgestor-django
|
escolaridade/models.py
|
Python
|
mit
| 203
|
from church_app.core import db, logger
from sqlalchemy.exc import IntegrityError
from church_app.lib.helper import db_date_parse, valid_email, valid_number
def save_children(children):
"""
Inserts member's children in the database
:param children: member's children array
:return:[Boolean, object/none, msg]
"""
for child in children:
if not child.name or not child.member_id or not child.birth_date:
return [True, None, "You need to return mandatory params"]
try:
child.birth_date = db_date_parse(child.birth_date)
except ValueError as ex:
logger.debug(ex)
return [True, None, "Date not valid"]
try:
db.add_all(children)
db.commit()
except IntegrityError as ex:
logger.debug(ex)
return [True, None, "Error processing the data"]
return [False, children, "Saved successfully"]
def save_member(member):
"""
Inserts member in the database
:param member
:return:[Boolean, object/none, msg]
"""
if (not member.name or not member.last_name or not member.birth_date or
not member.civil_status_id or not member.profession_id or
not member.church_department_id or not member.gender):
return [True, None, "You need to return mandatory params"]
try:
member.birth_date = db_date_parse(member.birth_date)
except ValueError as ex:
logger.debug(ex)
return [True, None, "Date not valid"]
if member.conversion_date:
try:
member.conversion_date = db_date_parse(member.conversion_date)
except ValueError as ex:
logger.debug(ex)
return [True, None, "Date not valid"]
if member.email and not valid_email(member.email):
return [True, None, "Email not valid"]
try:
db.add(member)
db.commit()
except IntegrityError as ex:
logger.debug(ex)
return [True, None, "Error processing the data"]
return [False, member, "Saved successfully"]
def save_member_baptism(member_baptism):
"""
Inserts member_baptism in the database
:param member_baptism
:return:[Boolean, object/none, msg]
"""
if not member_baptism.member_id or not member_baptism.baptism_id:
return [True, None, "You need to return mandatory params"]
try:
db.add(member_baptism)
db.commit()
except IntegrityError as ex:
logger.debug(ex)
return [True, None, "Error processing the data"]
return [False, member_baptism, "Saved successfully"]
def save_member_ministry(member_ministry):
"""
Inserts member_ministry in the database
:param member_ministry
:return:[Boolean, object/none, msg]
"""
if not member_ministry.member_id or not member_ministry.ministry_id \
or not member_ministry.starting_date:
return [True, None, "You need to return mandatory params"]
try:
member_ministry.starting_date = \
db_date_parse(member_ministry.starting_date)
except ValueError as ex:
logger.debug(ex)
return [True, None, "Date not valid"]
if member_ministry.end_date:
try:
member_ministry.end_date = \
db_date_parse(member_ministry.end_date)
except ValueError as ex:
logger.debug(ex)
return [True, None, "Date not valid"]
try:
db.add(member_ministry)
db.commit()
except IntegrityError as ex:
logger.debug(ex)
return [True, None, "Error processing the data"]
return [False, member_ministry, "Saved successfully"]
def save_member_position(member_position):
"""
Inserts member_ministry in the database
:param member_position
:return:[Boolean, object/none, msg]
"""
if not member_position.member_id or not member_position.position_id \
or not member_position.starting_date:
return [True, None, "You need to return mandatory params"]
try:
member_position.starting_date = \
db_date_parse(member_position.starting_date)
except ValueError as ex:
logger.debug(ex)
return [True, None, "Date not valid"]
if member_position.end_date:
try:
member_position.end_date = \
db_date_parse(member_position.end_date)
except ValueError as ex:
logger.debug(ex)
return [True, None, "Date not valid"]
try:
db.add(member_position)
db.commit()
except IntegrityError as ex:
logger.debug(ex)
return [True, None, "Error processing the data"]
return [False, member_position, "Saved successfully"]
def save_member_phones(phones):
"""
Inserts member_baptism in the database
:param phones
:return:[Boolean, object/none, msg]
"""
for phone in phones:
if not phone.member_id or not phone.phone_number or \
not phone.phone_type_id:
return [True, None, "You need to return mandatory params"]
if not valid_number(phone):
return [True, None, "Invalid phone number"]
try:
db.add_all(phones)
db.commit()
except IntegrityError as ex:
logger.debug(ex)
return [True, None, "Error processing the data"]
return [False, phones, "Saved successfully"]
|
euri16/church-manager
|
church_app/controllers/member_controller.py
|
Python
|
apache-2.0
| 5,384
|
import re
class Version(str):
""" This is NOT an implementation of semver, as users may use any pattern in their versions.
It is just a helper to parse .-, and compare taking into account integers when possible
"""
version_pattern = re.compile('[.-]')
def __new__(cls, content):
return str.__new__(cls, content.strip())
@property
def as_list(self):
if not hasattr(self, "_cached_list"):
tokens = self.rsplit('+', 1)
self._base = tokens[0]
if len(tokens) == 2:
self._build = tokens[1]
self._cached_list = []
tokens = Version.version_pattern.split(tokens[0])
for item in tokens:
self._cached_list.append(int(item) if item.isdigit() else item)
return self._cached_list
def major(self, fill=True):
self_list = self.as_list
if not isinstance(self_list[0], int):
return self._base
v = str(self_list[0]) if self_list else "0"
if fill:
return Version(".".join([v, 'Y', 'Z']))
return Version(v)
def stable(self):
""" same as major, but as semver, 0.Y.Z is not considered
stable, so return it as is
"""
if self.as_list[0] == 0:
return self
return self.major()
def minor(self, fill=True):
self_list = self.as_list
if not isinstance(self_list[0], int):
return self._base
v0 = str(self_list[0]) if len(self_list) > 0 else "0"
v1 = str(self_list[1]) if len(self_list) > 1 else "0"
if fill:
return Version(".".join([v0, v1, 'Z']))
return Version(".".join([v0, v1]))
def patch(self):
self_list = self.as_list
if not isinstance(self_list[0], int):
return self._base
v0 = str(self_list[0]) if len(self_list) > 0 else "0"
v1 = str(self_list[1]) if len(self_list) > 1 else "0"
v2 = str(self_list[2]) if len(self_list) > 2 else "0"
return Version(".".join([v0, v1, v2]))
def pre(self):
self_list = self.as_list
if not isinstance(self_list[0], int):
return self._base
v0 = str(self_list[0]) if len(self_list) > 0 else "0"
v1 = str(self_list[1]) if len(self_list) > 1 else "0"
v2 = str(self_list[2]) if len(self_list) > 2 else "0"
v = ".".join([v0, v1, v2])
if len(self_list) > 3:
v += "-%s" % self_list[3]
return Version(v)
@property
def build(self):
self.as_list
if hasattr(self, "_build"):
return self._build
return ""
@property
def base(self):
self.as_list
return Version(self._base)
def compatible(self, other):
if not isinstance(other, Version):
other = Version(other)
for v1, v2 in zip(self.as_list, other.as_list):
if v1 in ["X", "Y", "Z"] or v2 in ["X", "Y", "Z"]:
return True
if v1 != v2:
return False
return True
def __cmp__(self, other):
if other is None:
return 1
if not isinstance(other, Version):
other = Version(other)
# Check equals
def get_el(a_list, index):
if len(a_list) - 1 < index:
return 0 # out of range, 4 == 4.0 == 4.0.0
return a_list[index]
equals = all(get_el(other.as_list, ind) == get_el(self.as_list, ind)
for ind in range(0, max(len(other.as_list), len(self.as_list))))
if equals:
return 0
# Check greater than or less than
other_list = other.as_list
for ind, el in enumerate(self.as_list):
if ind + 1 > len(other_list):
if isinstance(el, int):
return 1
return -1
if not isinstance(el, int) and isinstance(other_list[ind], int):
# Version compare with 1.4.rc2
return -1
elif not isinstance(other_list[ind], int) and isinstance(el, int):
return 1
elif el == other_list[ind]:
continue
elif el > other_list[ind]:
return 1
else:
return -1
if len(other_list) > len(self.as_list):
return -1
def __gt__(self, other):
return self.__cmp__(other) == 1
def __lt__(self, other):
return self.__cmp__(other) == -1
def __le__(self, other):
return self.__cmp__(other) in [0, -1]
def __ge__(self, other):
return self.__cmp__(other) in [0, 1]
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return str.__hash__(self)
|
birsoyo/conan
|
conans/model/version.py
|
Python
|
mit
| 4,890
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import render
from cStringIO import StringIO
import xml.dom.minidom
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Table
from reportlab.lib.units import mm
from reportlab.lib.pagesizes import A4
import reportlab.lib
import copy
class simple(render.render):
def _render(self):
self.result = StringIO()
parser = xml.dom.minidom.parseString(self.xml)
title = parser.documentElement.tagName
doc = SimpleDocTemplate(self.result, pagesize=A4, title=title,
author='Odoo, Fabien Pinckaers', leftmargin=10*mm, rightmargin=10*mm)
styles = reportlab.lib.styles.getSampleStyleSheet()
title_style = copy.deepcopy(styles["Heading1"])
title_style.alignment = reportlab.lib.enums.TA_CENTER
story = [ Paragraph(title, title_style) ]
style_level = {}
nodes = [ (parser.documentElement,0) ]
while len(nodes):
node = nodes.pop(0)
value = ''
n=len(node[0].childNodes)-1
while n>=0:
if node[0].childNodes[n].nodeType==3:
value += node[0].childNodes[n].nodeValue
else:
nodes.insert( 0, (node[0].childNodes[n], node[1]+1) )
n-=1
if not node[1] in style_level:
style = copy.deepcopy(styles["Normal"])
style.leftIndent=node[1]*6*mm
style.firstLineIndent=-3*mm
style_level[node[1]] = style
story.append( Paragraph('<b>%s</b>: %s' % (node[0].tagName, value), style_level[node[1]]))
doc.build(story)
return self.result.getvalue()
if __name__=='__main__':
s = simple()
s.xml = '''<test>
<author-list>
<author>
<name>Fabien Pinckaers</name>
<age>23</age>
</author>
<author>
<name>Michel Pinckaers</name>
<age>53</age>
</author>
No other
</author-list>
</test>'''
if s.render():
print s.get()
|
vileopratama/vitech
|
src/openerp/report/render/simple.py
|
Python
|
mit
| 2,196
|
#isMaster = False
cluster_info = {#'3':('127.0.0.1', 8102,),
'1':('127.0.0.1', 8100,),
'2':('127.0.0.1', 8101,)
}
process_id = 1
#self_port = 8100
#self_IP = "127.0.0.1"
|
SuperMass/distOS-lab3
|
src/part1/frontend1/timeServer/time_config.py
|
Python
|
gpl-3.0
| 178
|
# -*- coding: utf-8 -*-
"""
.. module:: .exceptions.py
:synopsis: API Exceptions
.. moduleauthor:: Arthur Moore <arthur.moore85@gmail.com>
.. creation date:: 27-10-2017
.. licence::
"""
from __future__ import unicode_literals
__author__ = "arthur"
class ApiException(Exception):
"""
Base exception for all non-specific exceptions
"""
pass
class ForbiddenException(ApiException):
"""
Raised when a 403 response is received
"""
pass
class NotFoundException(ApiException):
"""
Raised when a 404 response is received
"""
pass
class MovedException(ApiException):
"""
Raised when a 301 response is received
"""
pass
class RedirectException(ApiException):
"""
Raised when a 307 response is received
"""
pass
class UnauthorizedException(ApiException):
"""
Raised when a 401 response is received.
"""
pass
class InternalServerException(ApiException):
"""
Raised when a 500 response is received.
"""
pass
class UnavailableException(ApiException):
"""
Raised when a 503 response is received.
"""
pass
class MissingEndpointException(ApiException):
"""
Raised when endpoint is missing.
"""
pass
class UnknownURLException(ApiException):
"""
Raised when the base URL is missing.
"""
pass
def handle_response_codes(status_code):
"""
Handles the exceptions for various types of
responses.
:param status_code:
:return:
"""
if status_code == 404:
raise NotFoundException(
'URL provided could not be found'
)
elif status_code == 403:
raise ForbiddenException(
'URL access is forbidden'
)
elif status_code == 301:
raise MovedException(
'URL permanently moved'
)
elif status_code == 307:
raise RedirectException(
'URL is temporarily redirected'
)
elif status_code == 401:
raise UnauthorizedException(
'You are unauthorized to view this URL'
)
elif status_code == 500:
raise InternalServerException(
'The remote server encountered an internal server error'
)
elif status_code == 503:
raise UnavailableException(
'URL is unavailable.'
)
elif status_code == 'missing_endpoint':
raise MissingEndpointException(
'Endpoint could not be found.'
)
elif status_code == 'missing_base_url':
raise UnknownURLException(
'Base URL has not been set.'
)
else:
raise ApiException('An error occurred')
|
ArthurMoore85/pi_romulus
|
api/exceptions.py
|
Python
|
gpl-2.0
| 2,675
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
try:
import http.client as http_client
except ImportError:
import httplib as http_client
try:
basestring
except NameError: # Python 3.x
basestring = str
import shutil
import socket
import sys
import types
from .extension_connection import ExtensionConnection
from .firefox_binary import FirefoxBinary
from .firefox_profile import FirefoxProfile
from .options import Options
from .remote_connection import FirefoxRemoteConnection
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .service import Service
from .webelement import FirefoxWebElement
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
_web_element_cls = FirefoxWebElement
def __init__(self, firefox_profile=None, firefox_binary=None,
timeout=30, capabilities=None, proxy=None,
executable_path="geckodriver", firefox_options=None,
log_path="geckodriver.log"):
"""Starts a new local session of Firefox.
Based on the combination and specificity of the various keyword
arguments, a capabilities dictionary will be constructed that
is passed to the remote end.
The keyword arguments given to this constructor are helpers to
more easily allow Firefox WebDriver sessions to be customised
with different options. They are mapped on to a capabilities
dictionary that is passed on to the remote end.
As some of the options, such as `firefox_profile` and
`firefox_options.profile` are mutually exclusive, precedence is
given from how specific the setting is. `capabilities` is the
least specific keyword argument, followed by `firefox_options`,
followed by `firefox_binary` and `firefox_profile`.
In practice this means that if `firefox_profile` and
`firefox_options.profile` are both set, the selected profile
instance will always come from the most specific variable.
In this case that would be `firefox_profile`. This will result in
`firefox_options.profile` to be ignored because it is considered
a less specific setting than the top-level `firefox_profile`
keyword argument. Similarily, if you had specified a
`capabilities["firefoxOptions"]["profile"]` Base64 string,
this would rank below `firefox_options.profile`.
:param firefox_profile: Instance of ``FirefoxProfile`` object
or a string. If undefined, a fresh profile will be created
in a temporary location on the system.
:param firefox_binary: Instance of ``FirefoxBinary`` or full
path to the Firefox binary. If undefined, the system default
Firefox installation will be used.
:param timeout: Time to wait for Firefox to launch when using
the extension connection.
:param capabilities: Dictionary of desired capabilities.
:param proxy: The proxy settings to us when communicating with
Firefox via the extension connection.
:param executable_path: Full path to override which geckodriver
binary to use for Firefox 47.0.1 and greater, which
defaults to picking up the binary from the system path.
:param firefox_options: Instance of ``options.Options``.
:param log_path: Where to log information from the driver.
"""
self.binary = None
self.profile = None
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX.copy()
if firefox_options is None:
firefox_options = Options()
if capabilities.get("binary"):
self.binary = capabilities["binary"]
# firefox_options overrides capabilities
if firefox_options is not None:
if firefox_options.binary is not None:
self.binary = firefox_options.binary
if firefox_options.profile is not None:
self.profile = firefox_options.profile
# firefox_binary and firefox_profile
# override firefox_options
if firefox_binary is not None:
if isinstance(firefox_binary, basestring):
firefox_binary = FirefoxBinary(firefox_binary)
self.binary = firefox_binary
firefox_options.binary = firefox_binary
if firefox_profile is not None:
if isinstance(firefox_profile, basestring):
firefox_profile = FirefoxProfile(firefox_profile)
self.profile = firefox_profile
firefox_options.profile = firefox_profile
# W3C remote
# TODO(ato): Perform conformance negotiation
if capabilities.get("marionette"):
self.service = Service(executable_path, log_path=log_path)
self.service.start()
capabilities.update(firefox_options.to_capabilities())
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
# Selenium remote
else:
if self.binary is None:
self.binary = FirefoxBinary()
if self.profile is None:
self.profile = FirefoxProfile()
# disable native events if globally disabled
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if proxy is not None:
proxy.add_to_capabilities(capabilities)
executor = ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except (http_client.BadStatusLine, socket.error):
# Happens if Firefox shutsdown before we've read the response from
# the socket.
pass
if "specificationLevel" in self.capabilities:
self.service.stop()
else:
self.binary.kill()
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
# Extension commands:
def set_context(self, context):
self.execute("SET_CONTEXT", {"context": context})
|
tkingless/webtesting
|
venvs/dev/lib/python2.7/site-packages/selenium/webdriver/firefox/webdriver.py
|
Python
|
mit
| 7,859
|
import blinker
frobnicated = blinker.signal('frobnicated')
class Receiver(object):
def __init__(self):
def handle_frobnicated(sender, **kwargs):
self.on_frobnicated(sender, **kwargs)
self.handle_frobnicated = handle_frobnicated
frobnicated.connect(handle_frobnicated)
def on_frobnicated(self, sender, **kwargs):
print sender, kwargs['message']
if __name__ == '__main__':
receiver = Receiver()
for i in range(10):
frobnicated.send('Sender %s' % i, message='hello')
|
voidabhi/python-scripts
|
blinker-example.py
|
Python
|
mit
| 554
|
from .. import interface
from ..interfaces import IContext
class IRequest(interface.Interface):
""" web request """
class IResponse(interface.Interface):
""" response """
class IParameters(interface.Interface):
""" parameters """
class IWebContext(IContext):
"""Web handler context"""
params = interface.Attribute('Parameters', spec='IParameters')
request = interface.Attribute('Request', spec='IRequest')
response = interface.Attribute('Response', spec='IResponse')
class IStream(interface.Interface):
""" stream handler """
def __call__(stream):
""" call stream from response renderer
:type stream: IStreamWriter
"""
class IStreamWriter(interface.Interface):
""" Writer object for stream """
params = interface.Attribute('Parameters', spec='IParameters')
request = interface.Attribute('Request', spec='IRequest')
def write(data):
""" write data to stream
:type data: bytes | bytearray | memoryview
"""
def write_eof():
""" write eof to stream,
writer object is not usable after calling this function
:rtype: None
"""
|
fafhrd91/mdl
|
mdl/web/interfaces.py
|
Python
|
apache-2.0
| 1,176
|
from __future__ import unicode_literals
from unittest import skipUnless
from django.db import connection
from django.db.models import Index
from django.db.utils import DatabaseError
from django.test import TransactionTestCase, mock, skipUnlessDBFeature
from django.test.utils import ignore_warnings
from django.utils.deprecation import RemovedInDjango21Warning
from .models import Article, ArticleReporter, City, District, Reporter
class IntrospectionTests(TransactionTestCase):
available_apps = ['introspection']
def test_table_names(self):
tl = connection.introspection.table_names()
self.assertEqual(tl, sorted(tl))
self.assertIn(Reporter._meta.db_table, tl, "'%s' isn't in table_list()." % Reporter._meta.db_table)
self.assertIn(Article._meta.db_table, tl, "'%s' isn't in table_list()." % Article._meta.db_table)
def test_django_table_names(self):
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE django_ixn_test_table (id INTEGER);')
tl = connection.introspection.django_table_names()
cursor.execute("DROP TABLE django_ixn_test_table;")
self.assertNotIn('django_ixn_test_table', tl,
"django_table_names() returned a non-Django table")
def test_django_table_names_retval_type(self):
# Table name is a list #15216
tl = connection.introspection.django_table_names(only_existing=True)
self.assertIs(type(tl), list)
tl = connection.introspection.django_table_names(only_existing=False)
self.assertIs(type(tl), list)
def test_table_names_with_views(self):
with connection.cursor() as cursor:
try:
cursor.execute(
'CREATE VIEW introspection_article_view AS SELECT headline '
'from introspection_article;')
except DatabaseError as e:
if 'insufficient privileges' in str(e):
self.fail("The test user has no CREATE VIEW privileges")
else:
raise
self.assertIn('introspection_article_view', connection.introspection.table_names(include_views=True))
self.assertNotIn('introspection_article_view', connection.introspection.table_names())
def test_unmanaged_through_model(self):
tables = connection.introspection.django_table_names()
self.assertNotIn(ArticleReporter._meta.db_table, tables)
def test_installed_models(self):
tables = [Article._meta.db_table, Reporter._meta.db_table]
models = connection.introspection.installed_models(tables)
self.assertEqual(models, {Article, Reporter})
def test_sequence_list(self):
sequences = connection.introspection.sequence_list()
expected = {'table': Reporter._meta.db_table, 'column': 'id'}
self.assertIn(expected, sequences, 'Reporter sequence not found in sequence_list()')
def test_get_table_description_names(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual([r[0] for r in desc],
[f.column for f in Reporter._meta.fields])
def test_get_table_description_types(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[datatype(r[1], r) for r in desc],
['AutoField' if connection.features.can_introspect_autofield else 'IntegerField',
'CharField', 'CharField', 'CharField',
'BigIntegerField' if connection.features.can_introspect_big_integer_field else 'IntegerField',
'BinaryField' if connection.features.can_introspect_binary_field else 'TextField',
'SmallIntegerField' if connection.features.can_introspect_small_integer_field else 'IntegerField']
)
def test_get_table_description_col_lengths(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
self.assertEqual(
[r[3] for r in desc if datatype(r[1], r) == 'CharField'],
[30, 30, 254]
)
@skipUnlessDBFeature('can_introspect_null')
def test_get_table_description_nullable(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, Reporter._meta.db_table)
nullable_by_backend = connection.features.interprets_empty_strings_as_nulls
self.assertEqual(
[r[6] for r in desc],
[False, nullable_by_backend, nullable_by_backend, nullable_by_backend, True, True, False]
)
@skipUnlessDBFeature('can_introspect_autofield')
def test_bigautofield(self):
with connection.cursor() as cursor:
desc = connection.introspection.get_table_description(cursor, City._meta.db_table)
self.assertIn('BigAutoField', [datatype(r[1], r) for r in desc])
# Regression test for #9991 - 'real' types in postgres
@skipUnlessDBFeature('has_real_datatype')
def test_postgresql_real_type(self):
with connection.cursor() as cursor:
cursor.execute("CREATE TABLE django_ixn_real_test_table (number REAL);")
desc = connection.introspection.get_table_description(cursor, 'django_ixn_real_test_table')
cursor.execute('DROP TABLE django_ixn_real_test_table;')
self.assertEqual(datatype(desc[0][1], desc[0]), 'FloatField')
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_relations(self):
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
# That's {field_name: (field_name_other_table, other_table)}
expected_relations = {
'reporter_id': ('id', Reporter._meta.db_table),
'response_to_id': ('id', Article._meta.db_table),
}
self.assertEqual(relations, expected_relations)
# Removing a field shouldn't disturb get_relations (#17785)
body = Article._meta.get_field('body')
with connection.schema_editor() as editor:
editor.remove_field(Article, body)
with connection.cursor() as cursor:
relations = connection.introspection.get_relations(cursor, Article._meta.db_table)
with connection.schema_editor() as editor:
editor.add_field(Article, body)
self.assertEqual(relations, expected_relations)
@skipUnless(connection.vendor == 'sqlite', "This is an sqlite-specific issue")
def test_get_relations_alt_format(self):
"""
With SQLite, foreign keys can be added with different syntaxes and
formatting.
"""
create_table_statements = [
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY(art_id) REFERENCES {}(id));",
"CREATE TABLE track(id, art_id INTEGER, FOREIGN KEY (art_id) REFERENCES {}(id));"
]
for statement in create_table_statements:
with connection.cursor() as cursor:
cursor.fetchone = mock.Mock(return_value=[statement.format(Article._meta.db_table)])
relations = connection.introspection.get_relations(cursor, 'mocked_table')
self.assertEqual(relations, {'art_id': ('id', Article._meta.db_table)})
@skipUnlessDBFeature('can_introspect_foreign_keys')
def test_get_key_columns(self):
with connection.cursor() as cursor:
key_columns = connection.introspection.get_key_columns(cursor, Article._meta.db_table)
self.assertEqual(
set(key_columns),
{('reporter_id', Reporter._meta.db_table, 'id'),
('response_to_id', Article._meta.db_table, 'id')})
def test_get_primary_key_column(self):
with connection.cursor() as cursor:
primary_key_column = connection.introspection.get_primary_key_column(cursor, Article._meta.db_table)
pk_fk_column = connection.introspection.get_primary_key_column(cursor, District._meta.db_table)
self.assertEqual(primary_key_column, 'id')
self.assertEqual(pk_fk_column, 'city_id')
@ignore_warnings(category=RemovedInDjango21Warning)
def test_get_indexes(self):
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Article._meta.db_table)
self.assertEqual(indexes['reporter_id'], {'unique': False, 'primary_key': False})
@ignore_warnings(category=RemovedInDjango21Warning)
def test_get_indexes_multicol(self):
"""
Multicolumn indexes are not included in the introspection results.
"""
with connection.cursor() as cursor:
indexes = connection.introspection.get_indexes(cursor, Reporter._meta.db_table)
self.assertNotIn('first_name', indexes)
self.assertIn('id', indexes)
def test_get_constraints_index_types(self):
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table)
index = {}
index2 = {}
for key, val in constraints.items():
if val['columns'] == ['headline', 'pub_date']:
index = val
if val['columns'] == ['headline', 'response_to_id', 'pub_date', 'reporter_id']:
index2 = val
self.assertEqual(index['type'], Index.suffix)
self.assertEqual(index2['type'], Index.suffix)
@skipUnlessDBFeature('supports_index_column_ordering')
def test_get_constraints_indexes_orders(self):
"""
Indexes have the 'orders' key with a list of 'ASC'/'DESC' values.
"""
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(cursor, Article._meta.db_table)
indexes_verified = 0
expected_columns = [
['reporter_id'],
['headline', 'pub_date'],
['response_to_id'],
['headline', 'response_to_id', 'pub_date', 'reporter_id'],
]
for key, val in constraints.items():
if val['index'] and not (val['primary_key'] or val['unique']):
self.assertIn(val['columns'], expected_columns)
self.assertEqual(val['orders'], ['ASC'] * len(val['columns']))
indexes_verified += 1
self.assertEqual(indexes_verified, 4)
def datatype(dbtype, description):
"""Helper to convert a data type into a string."""
dt = connection.introspection.get_field_type(dbtype, description)
if type(dt) is tuple:
return dt[0]
else:
return dt
|
cloudera/hue
|
desktop/core/ext-py/Django-1.11.29/tests/introspection/tests.py
|
Python
|
apache-2.0
| 10,863
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#Polina Morozova 16.11.2014
import sqlite3
import sys
import re
import datetime
def unescape(line):
line = line.replace(""", "\"")
line = line.replace("'", "'")
line = line.replace("&", "&")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace("«", "<<")
line = line.replace("»", ">>")
line = line.replace("'", "'")
line = line.replace("“", "\"")
line = line.replace("”", "\"")
line = line.replace("‘", "\'")
line = line.replace("’", "\'")
line = line.replace("■", "")
line = line.replace("•", "-")
return line
def query_messages(autor, d_low, d_high):
conn = sqlite3.connect('main.db')
try:
c = conn.cursor()
r = c.execute('SELECT body_xml FROM Messages WHERE author = ? and timestamp >= ? and timestamp < ? order by timestamp asc', (autor, d_low, d_high))
result=[]
for row in r:
text = re.sub('<[^<]+>', "", str(row[0]))
text = unescape(text)
result.append(text)
return result
finally:
conn.close()
def main(argv):
if len(argv) < 2:
print ("python fox.py date author")
return
date_input=argv[0] # 2014-11-30
autor = argv [1]
d = datetime.datetime.strptime( date_input, "%Y-%m-%d" )
d_low = int(d.timestamp())
d_high = d_low + 24*60*60*1000
result = query_messages(autor, d_low, d_high)
for message in result:
print (message)
if __name__ == '__main__':
main(sys.argv[1:])
|
p0linka/AA_hmw
|
hmw_3/fox.py
|
Python
|
mit
| 1,482
|
#Is a year a leap year
year = int(input("Enter a year: "))
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
print("{0} is a leap year, yay!".format(year))
else:
print("{0} isnt a leap year".format(year))
else:
print("{0} is a leap year".format(year))
else:
print("{0} isnt a leap year".format(year))
|
JessicaGarson/ExercismSubmissions
|
leapsubmission.py
|
Python
|
unlicense
| 370
|
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas._libs.tslibs import period as libperiod
import pandas as pd
from pandas import (
DatetimeIndex, Period, PeriodIndex, Series, notna, period_range)
from pandas.util import testing as tm
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx[0]
assert result == pd.Period('2011-01-01', freq='D')
result = idx[-1]
assert result == pd.Period('2011-01-31', freq='D')
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15',
'2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
def test_getitem_index(self):
idx = period_range('2007-01', periods=10, freq='M', name='x')
result = idx[[1, 3, 5]]
exp = pd.PeriodIndex(['2007-02', '2007-04', '2007-06'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
result = idx[[True, True, False, False, False,
True, True, False, False, False]]
exp = pd.PeriodIndex(['2007-01', '2007-02', '2007-06', '2007-07'],
freq='M', name='x')
tm.assert_index_equal(result, exp)
def test_getitem_partial(self):
rng = period_range('2007-01', periods=50, freq='M')
ts = Series(np.random.randn(len(rng)), rng)
with pytest.raises(KeyError, match=r"^'2006'$"):
ts['2006']
result = ts['2008']
assert (result.index.year == 2008).all()
result = ts['2008':'2009']
assert len(result) == 24
result = ts['2008-1':'2009-12']
assert len(result) == 24
result = ts['2008Q1':'2009Q4']
assert len(result) == 24
result = ts[:'2009']
assert len(result) == 36
result = ts['2009':]
assert len(result) == 50 - 24
exp = result
result = ts[24:]
tm.assert_series_equal(exp, result)
ts = ts[10:].append(ts[10:])
msg = "left slice bound for non-unique label: '2008'"
with pytest.raises(KeyError, match=msg):
ts[slice('2008', '2009')]
def test_getitem_datetime(self):
rng = period_range(start='2012-01-01', periods=10, freq='W-MON')
ts = Series(range(len(rng)), index=rng)
dt1 = datetime(2011, 10, 2)
dt4 = datetime(2012, 4, 20)
rs = ts[dt1:dt4]
tm.assert_series_equal(rs, ts)
def test_getitem_nat(self):
idx = pd.PeriodIndex(['2011-01', 'NaT', '2011-02'], freq='M')
assert idx[0] == pd.Period('2011-01', freq='M')
assert idx[1] is pd.NaT
s = pd.Series([0, 1, 2], index=idx)
assert s[pd.NaT] == 1
s = pd.Series(idx, index=idx)
assert (s[pd.Period('2011-01', freq='M')] ==
pd.Period('2011-01', freq='M'))
assert s[pd.NaT] is pd.NaT
def test_getitem_list_periods(self):
# GH 7710
rng = period_range(start='2012-01-01', periods=10, freq='D')
ts = Series(range(len(rng)), index=rng)
exp = ts.iloc[[1]]
tm.assert_series_equal(ts[[Period('2012-01-02', freq='D')]], exp)
def test_getitem_seconds(self):
# GH#6716
didx = pd.date_range(start='2013/01/01 09:00:00', freq='S',
periods=4000)
pidx = period_range(start='2013/01/01 09:00:00', freq='S',
periods=4000)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01/01 10:00'], s[3600:3660])
tm.assert_series_equal(s['2013/01/01 9H'], s[:3600])
for d in ['2013/01/01', '2013/01', '2013']:
tm.assert_series_equal(s[d], s)
def test_getitem_day(self):
# GH#6716
# Confirm DatetimeIndex and PeriodIndex works identically
didx = pd.date_range(start='2013/01/01', freq='D', periods=400)
pidx = period_range(start='2013/01/01', freq='D', periods=400)
for idx in [didx, pidx]:
# getitem against index should raise ValueError
values = ['2014', '2013/02', '2013/01/02', '2013/02/01 9H',
'2013/02/01 09:00']
for v in values:
# GH7116
# these show deprecations as we are trying
# to slice with non-integer indexers
# with pytest.raises(IndexError):
# idx[v]
continue
s = Series(np.random.rand(len(idx)), index=idx)
tm.assert_series_equal(s['2013/01'], s[0:31])
tm.assert_series_equal(s['2013/02'], s[31:59])
tm.assert_series_equal(s['2014'], s[365:])
invalid = ['2013/02/01 9H', '2013/02/01 09:00']
for v in invalid:
with pytest.raises(KeyError):
s[v]
class TestWhere:
@pytest.mark.parametrize('klass', [list, tuple, np.array, Series])
def test_where(self, klass):
i = period_range('20130101', periods=5, freq='D')
cond = [True] * len(i)
expected = i
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
cond = [False] + [True] * (len(i) - 1)
expected = PeriodIndex([pd.NaT] + i[1:].tolist(), freq='D')
result = i.where(klass(cond))
tm.assert_index_equal(result, expected)
def test_where_other(self):
i = period_range('20130101', periods=5, freq='D')
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = pd.PeriodIndex([pd.NaT, pd.NaT] + i[2:].tolist(),
freq='D')
result = i.where(notna(i2), i2.values)
tm.assert_index_equal(result, i2)
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D',
name='idx')
for idx in [idx1]:
result = idx.take([0])
assert result == pd.Period('2011-01-01', freq='D')
result = idx.take([5])
assert result == pd.Period('2011-01-06', freq='D')
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == 'D'
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03',
'2011-01-05'], freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05',
'2011-01-02'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
assert result.freq == 'D'
def test_take_misc(self):
index = period_range(start='1/1/10', end='12/31/12', freq='D',
name='idx')
expected = PeriodIndex([datetime(2010, 1, 6), datetime(2010, 1, 7),
datetime(2010, 1, 9), datetime(2010, 1, 13)],
freq='D', name='idx')
taken1 = index.take([5, 6, 8, 12])
taken2 = index[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, PeriodIndex)
assert taken.freq == index.freq
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.PeriodIndex(['2011-01-01', '2011-02-01', '2011-03-01'],
name='xxx', freq='D')
result = idx.take(np.array([1, 0, -1]))
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', 'NaT'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False,
fill_value=True)
expected = pd.PeriodIndex(['2011-02-01', '2011-01-01', '2011-03-01'],
name='xxx', freq='D')
tm.assert_index_equal(result, expected)
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "index -5 is out of bounds for size 3"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestIndexing:
def test_get_loc_msg(self):
idx = period_range('2000-1-1', freq='A', periods=10)
bad_period = Period('2012', 'A')
with pytest.raises(KeyError, match=r"^Period\('2012', 'A-DEC'\)$"):
idx.get_loc(bad_period)
try:
idx.get_loc(bad_period)
except KeyError as inst:
assert inst.args[0] == bad_period
def test_get_loc_nat(self):
didx = DatetimeIndex(['2011-01-01', 'NaT', '2011-01-03'])
pidx = PeriodIndex(['2011-01-01', 'NaT', '2011-01-03'], freq='M')
# check DatetimeIndex compat
for idx in [didx, pidx]:
assert idx.get_loc(pd.NaT) == 1
assert idx.get_loc(None) == 1
assert idx.get_loc(float('nan')) == 1
assert idx.get_loc(np.nan) == 1
def test_get_loc(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with non-duplicate
idx0 = pd.PeriodIndex([p0, p1, p2])
expected_idx1_p1 = 1
expected_idx1_p2 = 2
assert idx0.get_loc(p1) == expected_idx1_p1
assert idx0.get_loc(str(p1)) == expected_idx1_p1
assert idx0.get_loc(p2) == expected_idx1_p2
assert idx0.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx0.get_loc('foo')
with pytest.raises(KeyError, match=r"^1\.1$"):
idx0.get_loc(1.1)
msg = (r"'PeriodIndex\(\['2017-09-01', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key")
with pytest.raises(TypeError, match=msg):
idx0.get_loc(idx0)
# get the location of p1/p2 from
# monotonic increasing PeriodIndex with duplicate
idx1 = pd.PeriodIndex([p1, p1, p2])
expected_idx1_p1 = slice(0, 2)
expected_idx1_p2 = 2
assert idx1.get_loc(p1) == expected_idx1_p1
assert idx1.get_loc(str(p1)) == expected_idx1_p1
assert idx1.get_loc(p2) == expected_idx1_p2
assert idx1.get_loc(str(p2)) == expected_idx1_p2
msg = "Cannot interpret 'foo' as period"
with pytest.raises(KeyError, match=msg):
idx1.get_loc('foo')
with pytest.raises(KeyError, match=r"^1\.1$"):
idx1.get_loc(1.1)
msg = (r"'PeriodIndex\(\['2017-09-02', '2017-09-02', '2017-09-03'\],"
r" dtype='period\[D\]', freq='D'\)' is an invalid key")
with pytest.raises(TypeError, match=msg):
idx1.get_loc(idx1)
# get the location of p1/p2 from
# non-monotonic increasing/decreasing PeriodIndex with duplicate
idx2 = pd.PeriodIndex([p2, p1, p2])
expected_idx2_p1 = 1
expected_idx2_p2 = np.array([True, False, True])
assert idx2.get_loc(p1) == expected_idx2_p1
assert idx2.get_loc(str(p1)) == expected_idx2_p1
tm.assert_numpy_array_equal(idx2.get_loc(p2), expected_idx2_p2)
tm.assert_numpy_array_equal(idx2.get_loc(str(p2)), expected_idx2_p2)
def test_is_monotonic_increasing(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_increasing is True
assert idx_inc1.is_monotonic_increasing is True
assert idx_dec0.is_monotonic_increasing is False
assert idx_dec1.is_monotonic_increasing is False
assert idx.is_monotonic_increasing is False
def test_is_monotonic_decreasing(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
idx_inc0 = pd.PeriodIndex([p0, p1, p2])
idx_inc1 = pd.PeriodIndex([p0, p1, p1])
idx_dec0 = pd.PeriodIndex([p2, p1, p0])
idx_dec1 = pd.PeriodIndex([p2, p1, p1])
idx = pd.PeriodIndex([p1, p2, p0])
assert idx_inc0.is_monotonic_decreasing is False
assert idx_inc1.is_monotonic_decreasing is False
assert idx_dec0.is_monotonic_decreasing is True
assert idx_dec1.is_monotonic_decreasing is True
assert idx.is_monotonic_decreasing is False
def test_contains(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
p3 = pd.Period('2017-09-04')
ps0 = [p0, p1, p2]
idx0 = pd.PeriodIndex(ps0)
for p in ps0:
assert idx0.contains(p)
assert p in idx0
assert idx0.contains(str(p))
assert str(p) in idx0
assert idx0.contains('2017-09-01 00:00:01')
assert '2017-09-01 00:00:01' in idx0
assert idx0.contains('2017-09')
assert '2017-09' in idx0
assert not idx0.contains(p3)
assert p3 not in idx0
def test_get_value(self):
# GH 17717
p0 = pd.Period('2017-09-01')
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
idx0 = pd.PeriodIndex([p0, p1, p2])
input0 = np.array([1, 2, 3])
expected0 = 2
result0 = idx0.get_value(input0, p1)
assert result0 == expected0
idx1 = pd.PeriodIndex([p1, p1, p2])
input1 = np.array([1, 2, 3])
expected1 = np.array([1, 2])
result1 = idx1.get_value(input1, p1)
tm.assert_numpy_array_equal(result1, expected1)
idx2 = pd.PeriodIndex([p1, p2, p1])
input2 = np.array([1, 2, 3])
expected2 = np.array([1, 3])
result2 = idx2.get_value(input2, p1)
tm.assert_numpy_array_equal(result2, expected2)
def test_get_indexer(self):
# GH 17717
p1 = pd.Period('2017-09-01')
p2 = pd.Period('2017-09-04')
p3 = pd.Period('2017-09-07')
tp0 = pd.Period('2017-08-31')
tp1 = pd.Period('2017-09-02')
tp2 = pd.Period('2017-09-05')
tp3 = pd.Period('2017-09-09')
idx = pd.PeriodIndex([p1, p2, p3])
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex([tp0, tp1, tp2, tp3])
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2, -1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 0, 1, 2], dtype=np.intp))
res = idx.get_indexer(target, 'nearest',
tolerance=pd.Timedelta('1 day'))
tm.assert_numpy_array_equal(res,
np.array([0, 0, 1, -1], dtype=np.intp))
def test_get_indexer_non_unique(self):
# GH 17717
p1 = pd.Period('2017-09-02')
p2 = pd.Period('2017-09-03')
p3 = pd.Period('2017-09-04')
p4 = pd.Period('2017-09-05')
idx1 = pd.PeriodIndex([p1, p2, p1])
idx2 = pd.PeriodIndex([p2, p1, p3, p4])
result = idx1.get_indexer_non_unique(idx2)
expected_indexer = np.array([1, 0, 2, -1, -1], dtype=np.intp)
expected_missing = np.array([2, 3], dtype=np.int64)
tm.assert_numpy_array_equal(result[0], expected_indexer)
tm.assert_numpy_array_equal(result[1], expected_missing)
# TODO: This method came from test_period; de-dup with version above
def test_get_loc2(self):
idx = pd.period_range('2000-01-01', periods=3)
for method in [None, 'pad', 'backfill', 'nearest']:
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].asfreq('H', how='start'), method) == 1
assert idx.get_loc(idx[1].to_timestamp(), method) == 1
assert idx.get_loc(idx[1].to_timestamp()
.to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
idx = pd.period_range('2000-01-01', periods=5)[::2]
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance='1 day') == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=pd.Timedelta('1D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=np.timedelta64(1, 'D')) == 1
assert idx.get_loc('2000-01-02T12', method='nearest',
tolerance=timedelta(1)) == 1
msg = 'unit abbreviation w/o a number'
with pytest.raises(ValueError, match=msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='foo')
msg = 'Input has different freq=None from PeriodArray\\(freq=D\\)'
with pytest.raises(ValueError, match=msg):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 hour')
with pytest.raises(KeyError, match=r"^Period\('2000-01-10', 'D'\)$"):
idx.get_loc('2000-01-10', method='nearest', tolerance='1 day')
with pytest.raises(
ValueError,
match='list-like tolerance size must match target index size'):
idx.get_loc('2000-01-10', method='nearest',
tolerance=[pd.Timedelta('1 day').to_timedelta64(),
pd.Timedelta('1 day').to_timedelta64()])
# TODO: This method came from test_period; de-dup with version above
def test_get_indexer2(self):
idx = pd.period_range('2000-01-01', periods=3).asfreq('H', how='start')
tm.assert_numpy_array_equal(idx.get_indexer(idx),
np.array([0, 1, 2], dtype=np.intp))
target = pd.PeriodIndex(['1999-12-31T23', '2000-01-01T12',
'2000-01-02T01'], freq='H')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'pad'),
np.array([-1, 0, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'backfill'),
np.array([0, 1, 2], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest'),
np.array([0, 1, 1], dtype=np.intp))
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 hour'),
np.array([0, -1, 1], dtype=np.intp))
msg = 'Input has different freq=None from PeriodArray\\(freq=H\\)'
with pytest.raises(ValueError, match=msg):
idx.get_indexer(target, 'nearest', tolerance='1 minute')
tm.assert_numpy_array_equal(idx.get_indexer(target, 'nearest',
tolerance='1 day'),
np.array([0, 1, 1], dtype=np.intp))
tol_raw = [pd.Timedelta('1 hour'),
pd.Timedelta('1 hour'),
np.timedelta64(1, 'D'), ]
tm.assert_numpy_array_equal(
idx.get_indexer(target, 'nearest',
tolerance=[np.timedelta64(x) for x in tol_raw]),
np.array([0, -1, 1], dtype=np.intp))
tol_bad = [pd.Timedelta('2 hour').to_timedelta64(),
pd.Timedelta('1 hour').to_timedelta64(),
np.timedelta64(1, 'M'), ]
with pytest.raises(
libperiod.IncompatibleFrequency,
match='Input has different freq=None from'):
idx.get_indexer(target, 'nearest', tolerance=tol_bad)
def test_indexing(self):
# GH 4390, iat incorrectly indexing
index = period_range('1/1/2001', periods=10)
s = Series(np.random.randn(10), index=index)
expected = s[index[0]]
result = s.iat[0]
assert expected == result
def test_period_index_indexer(self):
# GH4125
idx = pd.period_range('2002-01', '2003-12', freq='M')
df = pd.DataFrame(pd.np.random.randn(24, 10), index=idx)
tm.assert_frame_equal(df, df.loc[idx])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df, df.loc[list(idx)])
tm.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
tm.assert_frame_equal(df, df.loc[list(idx)])
|
cbertinato/pandas
|
pandas/tests/indexes/period/test_indexing.py
|
Python
|
bsd-3-clause
| 25,316
|
# Copyright (c) 2001-2019, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, division
|
xlqian/navitia
|
source/tyr/tests/integration/__init__.py
|
Python
|
agpl-3.0
| 1,308
|
"""Settings URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.views.generic.base import RedirectView
from core.views import signup, login, WikiPage, newWikiPage
from django.contrib.auth import views as auth_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^wiki/(.*)/edit/$', WikiPage.as_view(template_name='wiki/edit.html'), name='wikiEdit'),
url(r'^wiki/(.*)/revisions/$', WikiPage.as_view(template_name='wiki/revisions.html'), name='wikiRevisions'),
url(r'^wiki/(.*)/$', WikiPage.as_view(), name='wiki'),
url(r'^wikiNew/$', newWikiPage, name='wikiNew'),
# url(r'^comments/', include('django_comments.urls')),
url(r'^login/$', auth_views.login, name='login'),
url(r'^logout/$', auth_views.logout, name='logout'),
url(r'^signup/', signup),
url(r'^wiki/$', RedirectView.as_view(url='/wiki/home', permanent=False)),
url(r'^$', RedirectView.as_view(url='/wiki/home', permanent=False),name='home'),
]
|
traverseda/MfD-wiki
|
Settings/urls.py
|
Python
|
agpl-3.0
| 1,624
|
#!/usr/bin/python
# -*- coding: iso-8859-15 -*-
## @file info_j2k.py
# The size in bytes, and a codestream Kbps, even detailed subband
# level and neglecting headers, from a J2K codestream.
#
# @authors Jose Carmelo Maturana-Espinosa\n Vicente Gonzalez-Ruiz.
# @date Last modification: 2015, January 7.
#
## @package info_j2k
# The size in bytes, and a codestream Kbps, even detailed subband
# level and neglecting headers, from a J2K codestream.
from info import info
from MCTF_parser import MCTF_parser
## Class info for J2K codec.
class info_j2k(info):
## Find the length of JPEG 2000 image.
# @param self Refers to object.
# @param file JPEG 2000 image.
# @return Length of JPEG 2000 image
def find_next_EOC_texture(self, file):
if file == None:
return 0
else:
return int(file.readline())
## Find the length of motion.
# @param self Refers to object.
# @param file Motion file.
# @return Length of motion.
def find_next_EOC_motion(self, file):
if file == None:
return 0
else:
return int(file.readline())
## Open a sizes files.
# @param self Refers to object.
# @param codestream_filename Codestream filename.
# @return Size file.
def open_codestream(self, codestream_filename):
try:
return open(codestream_filename, 'rb')
except IOError:
return None
## Bytes per frame in MCTF context.
# @param self Refers to object.
# @param bytes_frame_TM Size frames without MCTF order.
# @return Size frame.
#def sizeFrame_MCTF(self, bytes_frame_TM):
## Main function.
def main():
parser = MCTF_parser(description="Info.")
parser.add_argument("--GOPs", help="number of GOPs to process. (Default = {})".format(info.GOPs))
parser.add_argument("--TRLs", help="number of iterations of the temporal transform + 1. (Default = {})".format(info.TRLs))
parser.add_argument("--FPS", help="frames per second. (Default = {})".format(info.FPS))
args = parser.parse_known_args()[0]
if args.GOPs:
info.GOPs = int(args.GOPs)
if args.TRLs:
info.TRLs = int(args.TRLs)
if args.FPS:
info.FPS = int(args.FPS)
x=info_j2k(info.GOPs, info.TRLs, info.FPS) #x=info_j2k() # ?
if __name__ == '__main__':
main()
|
vicente-gonzalez-ruiz/QSVC
|
trunk/src/info_j2k.py
|
Python
|
gpl-2.0
| 2,409
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for AnalyzeContent
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflow
# [START dialogflow_generated_dialogflow_v2_Participants_AnalyzeContent_async]
from google.cloud import dialogflow_v2
async def sample_analyze_content():
# Create a client
client = dialogflow_v2.ParticipantsAsyncClient()
# Initialize request argument(s)
text_input = dialogflow_v2.TextInput()
text_input.text = "text_value"
text_input.language_code = "language_code_value"
request = dialogflow_v2.AnalyzeContentRequest(
text_input=text_input,
participant="participant_value",
)
# Make the request
response = await client.analyze_content(request=request)
# Handle the response
print(response)
# [END dialogflow_generated_dialogflow_v2_Participants_AnalyzeContent_async]
|
googleapis/python-dialogflow
|
samples/generated_samples/dialogflow_generated_dialogflow_v2_participants_analyze_content_async.py
|
Python
|
apache-2.0
| 1,679
|
# Copyright 2011 Joe Wreschnig, Christoph Reiter
# 2013-2020 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import sys
import bz2
import itertools
from functools import reduce
from http.client import HTTPException
from os.path import splitext
from threading import Thread
from typing import Dict, Collection, Callable, Iterable, Optional
from urllib.request import urlopen
import re
from gi.repository import Gtk, GLib, Pango
from senf import text2fsn
from quodlibet.util.dprint import print_d, print_e
import quodlibet
from quodlibet import _
from quodlibet import qltk
from quodlibet import util
from quodlibet import config
from quodlibet.browsers import Browser
from quodlibet.formats.remote import RemoteFile
from quodlibet.formats._audio import TAG_TO_SORT, MIGRATE, AudioFile
from quodlibet.library import SongLibrary
from quodlibet.query import Query
from quodlibet.qltk.getstring import GetStringDialog
from quodlibet.qltk.songsmenu import SongsMenu
from quodlibet.qltk.notif import Task
from quodlibet.qltk import Icons, ErrorMessage, WarningMessage
from quodlibet.util import copool, connect_destroy, sanitize_tags, connect_obj
from quodlibet.util.i18n import numeric_phrase
from quodlibet.util.path import uri_is_valid
from quodlibet.util.string import decode, encode
from quodlibet.util import print_w
from quodlibet.qltk.views import AllTreeView
from quodlibet.qltk.searchbar import SearchBarBox
from quodlibet.qltk.completion import LibraryTagCompletion
from quodlibet.qltk.x import MenuItem, Align, ScrolledWindow
from quodlibet.qltk.x import SymbolicIconImage
from quodlibet.qltk.menubutton import MenuButton
STATION_LIST_URL = \
"https://quodlibet.github.io/radio/radiolist.bz2"
STATIONS_FAV = os.path.join(quodlibet.get_user_dir(), "stations")
STATIONS_ALL = os.path.join(quodlibet.get_user_dir(), "stations_all")
# TODO: - Ranking: reduce duplicate stations (max 3 URLs per station)
# prefer stations that match a genre?
# Migration path for pickle
sys.modules["browsers.iradio"] = sys.modules[__name__]
class IRadioError(Exception):
pass
class IRFile(RemoteFile):
multisong = True
can_add = False
format = "Radio Station"
__CAN_CHANGE = "title artist grouping".split()
def __get(self, base_call, key, *args, **kwargs):
if key == "title" and "title" not in self and "organization" in self:
return base_call("organization", *args, **kwargs)
# split title by " - " if no artist tag is present and
# this is not the main song: common format for shoutcast stations
if not self.multisong and key in ("title", "artist") and \
"title" in self and "artist" not in self:
title = base_call("title").split(" - ", 1)
if len(title) > 1:
return (key == "title" and title[-1]) or title[0]
if key in ("artist", TAG_TO_SORT["artist"]) and \
not base_call(key, *args) and "website" in self:
return base_call("website", *args)
if key == "~format" and "audio-codec" in self:
return "%s (%s)" % (self.format,
base_call("audio-codec", *args, **kwargs))
return base_call(key, *args, **kwargs)
def __call__(self, key, *args, **kwargs):
base_call = super().__call__
return self.__get(base_call, key, *args, **kwargs)
def get(self, key, *args, **kwargs):
base_call = super().get
return self.__get(base_call, key, *args, **kwargs)
def write(self):
pass
def to_dump(self):
# dump without title
title = None
if "title" in self:
title = self["title"]
del self["title"]
dump = super().to_dump()
if title is not None:
self["title"] = title
# add all generated tags
lines = dump.splitlines()
for tag in ["title", "artist", "~format"]:
value = self.get(tag)
if value is not None:
lines.append(encode(tag) + b"=" + encode(value))
return b"\n".join(lines)
@property
def lyric_filename(self) -> Optional[str]:
return None
def can_change(self, k=None):
if self.streamsong:
if k is None:
return []
else:
return False
else:
if k is None:
return self.__CAN_CHANGE
else:
return k in self.__CAN_CHANGE
def parse_pls(file) -> Collection[IRFile]:
data = {}
lines = file.read().decode('utf-8', 'replace').splitlines()
if not lines or "[playlist]" not in lines.pop(0):
return []
for line in lines:
try:
head, val = line.strip().split("=", 1)
except (TypeError, ValueError):
continue
else:
head = head.lower()
if head.startswith("length") and val == "-1":
continue
else:
data[head] = val
count = 1
files = []
warnings = []
while True:
if "file%d" % count in data:
filename = text2fsn(data["file%d" % count])
if filename.lower()[-4:] in [".pls", ".m3u", "m3u8"]:
warnings.append(filename)
else:
irf = IRFile(filename)
for key in ["title", "genre", "artist"]:
try:
irf[key] = data["%s%d" % (key, count)]
except KeyError:
pass
try:
irf["~#length"] = int(data["length%d" % count])
except (KeyError, TypeError, ValueError):
pass
files.append(irf)
else:
break
count += 1
if warnings:
raise IRadioError(
_("Station lists can only contain locations of stations, "
"not other station lists or playlists. The following locations "
"cannot be loaded:\n%s") %
"\n ".join(map(util.escape, warnings)))
return files
def parse_m3u(fileobj) -> Collection[IRFile]:
files = []
pending_title = None
lines = fileobj.read().decode('utf-8', 'replace').splitlines()
for line in lines:
line = line.strip()
if line.startswith("#EXTINF:"):
try:
pending_title = line.split(",", 1)[1]
except IndexError:
pending_title = None
elif line.startswith("http"):
irf = IRFile(text2fsn(line))
if pending_title:
irf["title"] = pending_title
pending_title = None
files.append(irf)
return files
def _get_stations_from(uri: str,
on_done: Callable[[Iterable[IRFile], str], None])\
-> None:
"""Fetches the URI content and extracts IRFiles
Called from thread - so no direct GTK+ interaction
:param uri: URI of station
:param on_done: a callback taking files when done (or none if errored)
"""
with Task(_("Internet Radio"), _("Add stations")) as task:
irfs: Collection[IRFile] = []
GLib.idle_add(task.pulse)
if (uri.lower().endswith(".pls")
or uri.lower().endswith(".m3u")
or uri.lower().endswith(".m3u8")):
if not re.match('^([^/:]+)://', uri):
# Assume HTTP if no protocol given. See #2731
uri = 'http://' + uri
print_d("Assuming http: %s" % uri)
# Error handling outside
sock = None
GLib.idle_add(task.pulse)
_fn, ext = splitext(uri.lower())
try:
sock = urlopen(uri, timeout=6)
if ext == ".pls":
irfs = parse_pls(sock)
elif ext in (".m3u", ".m3u8"):
irfs = parse_m3u(sock)
GLib.idle_add(task.pulse)
except IOError as e:
print_e(f"Couldn't download from {uri} ({e})")
finally:
if sock:
sock.close()
else:
try:
irfs = [IRFile(uri)]
except ValueError as e:
print_e("Can't add URI %s" % uri, e)
on_done(irfs, uri)
def download_taglist(callback, cofuncid, step=1024 * 10):
"""Generator for loading the bz2 compressed tag list.
Calls callback with the decompressed data or None in case of
an error."""
with Task(_("Internet Radio"), _("Downloading station list")) as task:
if cofuncid:
task.copool(cofuncid)
try:
response = urlopen(STATION_LIST_URL)
except (EnvironmentError, HTTPException) as e:
print_e("Failed fetching from %s" % STATION_LIST_URL, e)
GLib.idle_add(callback, None)
return
try:
size = int(response.info().get("content-length", 0))
except ValueError:
size = 0
decomp = bz2.BZ2Decompressor()
data = b""
temp = b""
read = 0
while temp or not data:
read += len(temp)
if size:
task.update(float(read) / size)
else:
task.pulse()
yield True
try:
data += decomp.decompress(temp)
temp = response.read(step)
except (IOError, EOFError):
data = None
break
response.close()
yield True
stations = None
if data:
stations = parse_taglist(data)
print_d(f"Got {len(stations or [])} station(s)")
GLib.idle_add(callback, stations)
def parse_taglist(data):
"""Parses a dump file like list of tags and returns a list of IRFiles
uri=http://...
tag=value1
tag2=value
tag=value2
uri=http://...
...
"""
stations = []
station = None
for l in data.split(b"\n"):
if not l:
continue
key = l.split(b"=")[0]
value = l.split(b"=", 1)[1]
key = decode(key)
value = decode(value)
if key == "uri":
if station:
stations.append(station)
station = IRFile(value)
continue
san = list(sanitize_tags({key: value}, stream=True).items())
if not san:
continue
key, value = san[0]
if key == "~listenerpeak":
key = "~#listenerpeak"
value = int(value)
if not station:
continue
if isinstance(value, str):
if value not in station.list(key):
station.add(key, value)
else:
station[key] = value
if station:
stations.append(station)
return stations
class AddNewStation(GetStringDialog):
def __init__(self, parent):
super().__init__(
parent, _("New Station"),
_("Enter the location of an Internet radio station:"),
button_label=_("_Add"), button_icon=Icons.LIST_ADD)
def _verify_clipboard(self, text):
# try to extract a URI from the clipboard
for line in text.splitlines():
line = line.strip()
if uri_is_valid(line):
return line
class GenreFilter:
STAR = ["genre", "organization"]
# This probably needs improvements
GENRES = {
"electronic": (
_("Electronic"),
"|(electr,house,techno,trance,/trip.?hop/,&(drum,n,bass),chill,"
"dnb,minimal,/down(beat|tempo)/,&(dub,step))"),
"rap": (_("Hip Hop / Rap"), "|(&(hip,hop),rap)"),
"oldies": (_("Oldies"), r"|(/[2-9]0\S?s/,oldies)"),
"r&b": (_("R&B"), r"/r(\&|n)b/"),
"japanese": (_("Japanese"), "|(anime,jpop,japan,jrock)"),
"indian": (_("Indian"), "|(bollywood,hindi,indian,bhangra)"),
"religious": (
_("Religious"),
"|(religious,christian,bible,gospel,spiritual,islam)"),
"charts": (_("Charts"), "|(charts,hits,top)"),
"turkish": (_("Turkish"), "|(turkish,turkce)"),
"reggae": (_("Reggae / Dancehall"), r"|(/reggae([^\w]|$)/,dancehall)"),
"latin": (_("Latin"), "|(latin,salsa)"),
"college": (_("College Radio"), "|(college,campus)"),
"talk_news": (_("Talk / News"), "|(news,talk)"),
"ambient": (_("Ambient"), "|(ambient,easy)"),
"jazz": (_("Jazz"), "|(jazz,swing)"),
"classical": (_("Classical"), "classic"),
"pop": (_("Pop"), None),
"alternative": (_("Alternative"), None),
"metal": (_("Metal"), None),
"country": (_("Country"), None),
"news": (_("News"), None),
"schlager": (_("Schlager"), None),
"funk": (_("Funk"), None),
"indie": (_("Indie"), None),
"blues": (_("Blues"), None),
"soul": (_("Soul"), None),
"lounge": (_("Lounge"), None),
"punk": (_("Punk"), None),
"reggaeton": (_("Reggaeton"), None),
"slavic": (
_("Slavic"),
"|(narodna,albanian,manele,shqip,kosova)"),
"greek": (_("Greek"), None),
"gothic": (_("Gothic"), None),
"rock": (_("Rock"), None),
}
# parsing all above takes 350ms on an atom, so only generate when needed
__CACHE: Dict[str, Query] = {}
def keys(self):
return self.GENRES.keys()
def query(self, key):
if key not in self.__CACHE:
text, filter_ = self.GENRES[key]
if filter_ is None:
filter_ = key
self.__CACHE[key] = Query(filter_, star=self.STAR)
return self.__CACHE[key]
def text(self, key):
return self.GENRES[key][0]
class CloseButton(Gtk.Button):
"""Reimplementation of 3.10 close button for InfoBar."""
def __init__(self):
image = Gtk.Image(visible=True, can_focus=False,
icon_name="window-close-symbolic")
super().__init__(
visible=False, can_focus=True, image=image,
relief=Gtk.ReliefStyle.NONE, valign=Gtk.Align.CENTER)
ctx = self.get_style_context()
ctx.add_class("raised")
ctx.add_class("close")
class QuestionBar(Gtk.InfoBar):
"""A widget which suggest to download the radio list if
no radio stations are present.
Connect to Gtk.InfoBar::response and check for RESPONSE_LOAD
as response id.
"""
RESPONSE_LOAD = 1
def __init__(self):
super().__init__()
self.connect("response", self.__response)
self.set_message_type(Gtk.MessageType.QUESTION)
label = Gtk.Label(label=_(
"Would you like to load a list of popular radio stations?"))
label.set_line_wrap(True)
label.show()
content = self.get_content_area()
content.add(label)
self.add_button(_("_Load Stations"), self.RESPONSE_LOAD)
self.set_show_close_button(True)
def __response(self, bar, response_id):
if response_id == Gtk.ResponseType.CLOSE:
bar.hide()
class InternetRadio(Browser, util.InstanceTracker):
__stations = None
__fav_stations = None
__librarian = None
__filter = None
name = _("Internet Radio")
accelerated_name = _("_Internet Radio")
keys = ["InternetRadio"]
priority = 16
uses_main_library = False
headers = "title artist ~people grouping genre website ~format " \
"channel-mode".split()
TYPE, ICON_NAME, KEY, NAME = range(4)
TYPE_FILTER, TYPE_ALL, TYPE_FAV, TYPE_SEP, TYPE_NOCAT = range(5)
STAR = ["artist", "title", "website", "genre", "comment"]
@classmethod
def _init(klass, library):
klass.__librarian = library.librarian
klass.__stations = SongLibrary("iradio-remote")
klass.__stations.load(STATIONS_ALL)
klass.__fav_stations = SongLibrary("iradio")
klass.__fav_stations.load(STATIONS_FAV)
klass.filters = GenreFilter()
@classmethod
def _destroy(klass):
if klass.__stations.dirty:
klass.__stations.save()
klass.__stations.destroy()
klass.__stations = None
if klass.__fav_stations.dirty:
klass.__fav_stations.save()
klass.__fav_stations.destroy()
klass.__fav_stations = None
klass.__librarian = None
klass.filters = None
def finalize(self, restored):
if not restored:
# Select "All Stations" by default
def sel_all(row):
return row[self.TYPE] == self.TYPE_ALL
self.view.select_by_func(sel_all, one=True)
def __inhibit(self):
self.view.get_selection().handler_block(self.__changed_sig)
def __uninhibit(self):
self.view.get_selection().handler_unblock(self.__changed_sig)
def __destroy(self, *args):
if not self.instances():
self._destroy()
def __init__(self, library):
super().__init__(spacing=12)
self.set_orientation(Gtk.Orientation.VERTICAL)
if not self.instances():
self._init(library)
self._register_instance()
self.connect('destroy', self.__destroy)
completion = LibraryTagCompletion(self.__stations)
self.accelerators = Gtk.AccelGroup()
self.__searchbar = search = SearchBarBox(completion=completion,
accel_group=self.accelerators)
search.connect('query-changed', self.__filter_changed)
menu = Gtk.Menu()
new_item = MenuItem(_(u"_New Station…"), Icons.LIST_ADD)
new_item.connect('activate', self.__add)
menu.append(new_item)
update_item = MenuItem(_("_Update Stations"), Icons.VIEW_REFRESH)
update_item.connect('activate', self.__update)
menu.append(update_item)
menu.show_all()
button = MenuButton(
SymbolicIconImage(Icons.EMBLEM_SYSTEM, Gtk.IconSize.MENU),
arrow=True)
button.set_menu(menu)
def focus(widget, *args):
qltk.get_top_parent(widget).songlist.grab_focus()
search.connect('focus-out', focus)
# treeview
scrolled_window = ScrolledWindow()
scrolled_window.show()
scrolled_window.set_shadow_type(Gtk.ShadowType.IN)
self.view = view = AllTreeView()
view.show()
view.set_headers_visible(False)
scrolled_window.set_policy(
Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scrolled_window.add(view)
model = Gtk.ListStore(int, str, str, str)
model.append(row=[self.TYPE_ALL, Icons.FOLDER, "__all",
_("All Stations")])
model.append(row=[self.TYPE_SEP, Icons.FOLDER, "", ""])
# Translators: Favorite radio stations
model.append(row=[self.TYPE_FAV, Icons.FOLDER, "__fav",
_("Favorites")])
model.append(row=[self.TYPE_SEP, Icons.FOLDER, "", ""])
filters = self.filters
for text, k in sorted([(filters.text(k), k) for k in filters.keys()]):
model.append(row=[self.TYPE_FILTER, Icons.EDIT_FIND, k, text])
model.append(row=[self.TYPE_NOCAT, Icons.FOLDER,
"nocat", _("No Category")])
def separator(model, iter, data):
return model[iter][self.TYPE] == self.TYPE_SEP
view.set_row_separator_func(separator, None)
def search_func(model, column, key, iter, data):
return key.lower() not in model[iter][column].lower()
view.set_search_column(self.NAME)
view.set_search_equal_func(search_func, None)
column = Gtk.TreeViewColumn("genres")
column.set_sizing(Gtk.TreeViewColumnSizing.FIXED)
renderpb = Gtk.CellRendererPixbuf()
renderpb.props.xpad = 3
column.pack_start(renderpb, False)
column.add_attribute(renderpb, "icon-name", self.ICON_NAME)
render = Gtk.CellRendererText()
render.set_property('ellipsize', Pango.EllipsizeMode.END)
view.append_column(column)
column.pack_start(render, True)
column.add_attribute(render, "text", self.NAME)
view.set_model(model)
# selection
selection = view.get_selection()
selection.set_mode(Gtk.SelectionMode.MULTIPLE)
self.__changed_sig = connect_destroy(selection, 'changed',
util.DeferredSignal(lambda x: self.activate()))
box = Gtk.HBox(spacing=6)
box.pack_start(search, True, True, 0)
box.pack_start(button, False, True, 0)
self._searchbox = Align(box, left=0, right=6, top=6)
self._searchbox.show_all()
def qbar_response(infobar, response_id):
if response_id == infobar.RESPONSE_LOAD:
self.__update()
self.qbar = QuestionBar()
self.qbar.connect("response", qbar_response)
if self._is_library_empty():
self.qbar.show()
pane = qltk.ConfigRHPaned("browsers", "internetradio_pos", 0.4)
pane.show()
pane.pack1(scrolled_window, resize=False, shrink=False)
songbox = Gtk.VBox(spacing=6)
songbox.pack_start(self._searchbox, False, True, 0)
self._songpane_container = Gtk.VBox()
self._songpane_container.show()
songbox.pack_start(self._songpane_container, True, True, 0)
songbox.pack_start(self.qbar, False, True, 0)
songbox.show()
pane.pack2(songbox, resize=True, shrink=False)
self.pack_start(pane, True, True, 0)
self.show()
def _is_library_empty(self):
return not len(self.__stations) and not len(self.__fav_stations)
def pack(self, songpane):
container = Gtk.VBox()
container.add(self)
self._songpane_container.add(songpane)
return container
def unpack(self, container, songpane):
self._songpane_container.remove(songpane)
container.remove(self)
def __update(self, *args):
self.qbar.hide()
copool.add(download_taglist, self.__update_done,
cofuncid="radio-load", funcid="radio-load")
def __update_done(self, stations):
if not stations:
print_w("Loading remote station list failed.")
return
# filter stations based on quality, listenercount
def filter_stations(station):
peak = station.get("~#listenerpeak", 0)
if peak < 10:
return False
aac = "AAC" in station("~format")
bitrate = station("~#bitrate", 50)
if (aac and bitrate < 40) or (not aac and bitrate < 60):
return False
return True
stations = filter(filter_stations, stations)
# group them based on the title
groups = {}
for s in stations:
key = s("~title~artist")
groups.setdefault(key, []).append(s)
# keep at most 2 URLs for each group
stations = []
for key, sub in groups.items():
sub.sort(key=lambda s: s.get("~#listenerpeak", 0), reverse=True)
stations.extend(sub[:2])
# only keep the ones in at least one category
all_ = [self.filters.query(k) for k in self.filters.keys()]
assert all_
anycat_filter = reduce(lambda x, y: x | y, all_)
stations = list(filter(anycat_filter.search, stations))
# remove listenerpeak
for s in stations:
s.pop("~#listenerpeak", None)
# update the libraries
stations = dict(((s.key, s) for s in stations))
# don't add ones that are in the fav list
for fav in self.__fav_stations.keys():
stations.pop(fav, None)
# separate
o, n = set(self.__stations.keys()), set(stations)
to_add, to_change, to_remove = n - o, o & n, o - n
del o, n
# migrate stats
to_change = [stations.pop(k) for k in to_change]
for new in to_change:
old = self.__stations[new.key]
# clear everything except stats
AudioFile.reload(old)
# add new metadata except stats
for k in (x for x in new.keys() if x not in MIGRATE):
old[k] = new[k]
to_add = [stations.pop(k) for k in to_add]
to_remove = [self.__stations[k] for k in to_remove]
self.__stations.remove(to_remove)
self.__stations.changed(to_change)
self.__stations.add(to_add)
def __filter_changed(self, bar, text, restore=False):
self.__filter = Query(text, self.STAR)
if not restore:
self.activate()
def __get_selected_libraries(self):
"""Returns the libraries to search in depending on the
filter selection"""
selection = self.view.get_selection()
model, rows = selection.get_selected_rows()
types = [model[row][self.TYPE] for row in rows]
libs = [self.__fav_stations]
if types != [self.TYPE_FAV]:
libs.append(self.__stations)
return libs
def __get_selection_filter(self):
"""Returns a filter object for the current selection or None
if nothing should be filtered"""
selection = self.view.get_selection()
model, rows = selection.get_selected_rows()
filter_ = None
for row in rows:
type_ = model[row][self.TYPE]
if type_ == self.TYPE_FILTER:
key = model[row][self.KEY]
current_filter = self.filters.query(key)
if current_filter:
if filter_:
filter_ |= current_filter
else:
filter_ = current_filter
elif type_ == self.TYPE_NOCAT:
# if notcat is selected, combine all filters, negate and merge
all_ = [self.filters.query(k) for k in self.filters.keys()]
nocat_filter = all_ and -reduce(lambda x, y: x | y, all_)
if nocat_filter:
if filter_:
filter_ |= nocat_filter
else:
filter_ = nocat_filter
elif type_ == self.TYPE_ALL:
filter_ = None
break
return filter_
def unfilter(self):
self.filter_text("")
def __add_fav(self, songs):
songs = [s for s in songs if s in self.__stations]
type(self).__librarian.move(
songs, self.__stations, self.__fav_stations)
def __remove_fav(self, songs):
songs = [s for s in songs if s in self.__fav_stations]
type(self).__librarian.move(
songs, self.__fav_stations, self.__stations)
def __add(self, button):
parent = qltk.get_top_parent(self)
uri = (AddNewStation(parent).run(clipboard=True) or "").strip()
if uri != "":
self.__add_stations_from(uri)
def __add_stations(self, irfs: Collection[IRFile], uri: str) -> None:
print_d(f"Got {len(irfs)} station(s) from {uri}")
assert self.__fav_stations is not None
if not irfs:
msg = ErrorMessage(
self, _("No stations found"),
_("No Internet radio stations were found at %s.") %
util.escape(uri))
msg.run()
return
fav_uris = {af("~uri") for af in self.__fav_stations}
irfs = {af for af in irfs if af("~uri") not in fav_uris}
if irfs:
print_d(f"Adding {irfs} to favourites")
self.__fav_stations.add(irfs)
else:
message = WarningMessage(
self,
_("Nothing to add"),
_("All stations listed are already in your library."))
message.run()
def __add_stations_from(self, uri: str) -> None:
def on_done(irfs: Iterable[IRFile], uri: str):
GLib.idle_add(self.__add_stations, irfs, uri)
print_d("Quitting thread")
Thread(target=_get_stations_from, args=(uri, on_done)).start()
def Menu(self, songs, library, items):
in_fav = False
in_all = False
for song in songs:
if song in self.__fav_stations:
in_fav = True
elif song in self.__stations:
in_all = True
if in_fav and in_all:
break
iradio_items = []
button = MenuItem(_("Add to Favorites"), Icons.LIST_ADD)
button.set_sensitive(in_all)
connect_obj(button, 'activate', self.__add_fav, songs)
iradio_items.append(button)
button = MenuItem(_("Remove from Favorites"), Icons.LIST_REMOVE)
button.set_sensitive(in_fav)
connect_obj(button, 'activate', self.__remove_fav, songs)
iradio_items.append(button)
items.append(iradio_items)
menu = SongsMenu(self.__librarian, songs, playlists=False, remove=True,
queue=False, items=items)
return menu
def restore(self):
text = config.gettext("browsers", "query_text")
self.__searchbar.set_text(text)
if Query(text).is_parsable:
self.__filter_changed(self.__searchbar, text, restore=True)
keys = config.get("browsers", "radio").splitlines()
def select_func(row):
return row[self.TYPE] != self.TYPE_SEP and row[self.KEY] in keys
self.__inhibit()
view = self.view
if not view.select_by_func(select_func):
for row in view.get_model():
if row[self.TYPE] == self.TYPE_FAV:
view.set_cursor(row.path)
break
self.__uninhibit()
def __get_filter(self):
filter_ = self.__get_selection_filter()
text_filter = self.__filter or Query("")
if filter_:
filter_ &= text_filter
else:
filter_ = text_filter
return filter_
def can_filter_text(self):
return True
def filter_text(self, text):
self.__searchbar.set_text(text)
if Query(text).is_parsable:
self.__filter_changed(self.__searchbar, text)
self.activate()
def get_filter_text(self):
return self.__searchbar.get_text()
def activate(self):
filter_ = self.__get_filter()
libs = self.__get_selected_libraries()
songs = filter_.filter(itertools.chain(*libs))
self.songs_selected(songs)
def active_filter(self, song):
for lib in self.__get_selected_libraries():
if song in lib:
break
else:
return False
filter_ = self.__get_filter()
if filter_:
return filter_.search(song)
return True
def save(self):
text = self.__searchbar.get_text()
config.settext("browsers", "query_text", text)
selection = self.view.get_selection()
model, rows = selection.get_selected_rows()
names = filter(None, [model[row][self.KEY] for row in rows])
config.set("browsers", "radio", "\n".join(names))
def scroll(self, song):
# nothing we care about
if song not in self.__stations and song not in self.__fav_stations:
return
path = None
for row in self.view.get_model():
if row[self.TYPE] == self.TYPE_FILTER:
if self.filters.query(row[self.KEY]).search(song):
path = row.path
break
else:
# in case nothing matches, select all
path = (0,)
self.view.set_cursor(path)
self.view.scroll_to_cell(path, use_align=True, row_align=0.5)
def status_text(self, count: int, time: Optional[str] = None) -> str:
return numeric_phrase("%(count)d station", "%(count)d stations", count, 'count')
from quodlibet import app
if not app.player or app.player.can_play_uri("http://"):
browsers = [InternetRadio]
else:
browsers = []
|
quodlibet/quodlibet
|
quodlibet/browsers/iradio.py
|
Python
|
gpl-2.0
| 32,430
|
"""
Functions for generating several types of classic networks.
Functions:
build_star_network
build_chain_network
build_ring_network
build_random_network
build_clique_network
build_hypercube_network
build_grid_network
"""
import jbnetwork as jbn
def build_star_network(size):
"""Build a star network. Returns Network object."""
network = jbn.Network()
for i in range(1, size):
network.add_link(0, i)
return network
def build_chain_network(size):
"""Build a chain network. Returns Network object."""
network = jbn.Network()
for i in range(size-1):
network.add_link(i, i+1)
return network
def build_ring_network(size):
"""Build a ring network. Returns Network object."""
network = jbn.Network()
for i in range(size-1):
network.add_link(i, i+1)
network.add_link(0, size-1)
return network
def build_random_network(size, prob):
"""Build a random (Erdos-Renyi) network. Returns Network object."""
network = jbn.Network()
for i in range(size):
network.add_node(i)
for i in range(size-1):
for j in range(i+1, size):
if random.random() < prob:
network.add_link(i, j)
return network
def build_clique_network(size):
"""Build a clique network. Returns Network object."""
network = jbn.Network()
for i in range(size-1):
for j in range(i+1, size):
network.add_link(i, j)
return network
def build_hypercube_network(size):
"""Build a hypercube network. Returns Network object."""
# pylint: disable=missing-docstring
def _rec_build_hc_net(size):
if size == 1:
return {0:{}}
network = {}
network1 = _rec_build_hc_net(size/2)
for node1 in network1:
network[node1] = network1[node1]
network[node1 + size/2] = {}
for node2 in network1[node1]:
network[node1 + size/2][node2 + size/2] = 1
network[node1][node1 + size/2] = 1
network[node1 + size/2][node1] = 1
return network
# Find largest power of 2 <= size
pow2size = 2**int(math.log(size, 2))
network = _rec_build_hc_net(pow2size)
return Network(from_dict=network)
def build_grid_network(dim):
"""Build a grid network. Returns Network object.
arguments
dim -- (x, y) tuple of dimensions
"""
network = jbn.Network()
for node in range(size[0] * size[1]):
if (node+1) % size[0] != 0:
network.add_link(node, node+1)
if node < (size[1] - 1)*size[0]:
network.add_link(node, node+size[0])
return network
|
jbchouinard/jbnetwork
|
jbnetworkfactory.py
|
Python
|
mit
| 2,645
|
from channels import route_class, route
from applications.server import ApplicationWebSocket
# The channel routing defines what channels get handled by what consumers,
# including optional matching on message attributes. In this example, we route
# all WebSocket connections to the class-based BindingConsumer (the consumer
# class itself specifies what channels it wants to consume)
channel_routing = [
route_class(ApplicationWebSocket,path = r'^/ws'),
]
|
jimmy201602/django-gateone
|
django_gateone/routing.py
|
Python
|
gpl-3.0
| 460
|
# Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import unittest
import ansiblelint.utils
from ansiblelint import AnsibleLintRule
from rules import EMatcherRule, UnsetVariableMatcherRule
class TestRule(unittest.TestCase):
def test_rule_matching(self):
text = ""
filename = 'test/ematchtest.txt'
with open(filename) as f:
text = f.read()
ematcher = EMatcherRule.EMatcherRule()
matches = ematcher.matchlines(dict(path=filename, type='playbooks'), text)
self.assertEqual(len(matches), 3)
def test_rule_postmatching(self):
text = ""
filename = 'test/bracketsmatchtest.txt'
with open(filename) as f:
text = f.read()
rule = UnsetVariableMatcherRule.UnsetVariableMatcherRule()
matches = rule.matchlines(dict(path=filename, type='playbooks'), text)
self.assertEqual(len(matches), 2)
|
charleswhchan/ansible-lint
|
test/TestLintRule.py
|
Python
|
mit
| 1,975
|
from rest_framework import permissions as rest_permissions
from rest_framework import viewsets
from django.core.urlresolvers import reverse_lazy
from django.views.generic import CreateView, UpdateView
from geotrek.flatpages.serializers import FlatPageSerializer
from geotrek.flatpages import models as flatpages_models
from .forms import FlatPageForm
class FlatPageViewSet(viewsets.ModelViewSet):
"""
A viewset for viewing and editing flat pages instances.
"""
model = flatpages_models.FlatPage
serializer_class = FlatPageSerializer
permission_classes = [rest_permissions.DjangoModelPermissionsOrAnonReadOnly]
def get_queryset(self):
return flatpages_models.FlatPage.objects.filter(published=True)
class FlatPageCreate(CreateView):
model = flatpages_models.FlatPage
form_class = FlatPageForm
success_url = reverse_lazy('admin:flatpages_flatpage_changelist')
class FlatPageUpdate(UpdateView):
model = flatpages_models.FlatPage
form_class = FlatPageForm
success_url = reverse_lazy('admin:flatpages_flatpage_changelist')
|
mabhub/Geotrek
|
geotrek/flatpages/views.py
|
Python
|
bsd-2-clause
| 1,088
|
import json
import getpass
import git
import os.path
import os
import sys
import signal
import subprocess
import tempfile
import logging
logging.basicConfig(format='%(message)s', level=logging.INFO)
# Silences Traceback on Ctrl-C
signal.signal(signal.SIGINT, lambda x,y: os._exit(1))
BOLD = '\033[1m'
ITALIC = '\033[3m'
UNDERLINE = '\033[4m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
RESET = '\033[0m'
def main_branch_name(repo):
"""
Returns the name of the 'main' branch.
Git defaults to 'master', but it doesn't have to be!
"""
ref = git.refs.symbolic.SymbolicReference(repo, 'refs/remotes/origin/HEAD')
name = ref.ref.name
return name[len('origin/'):]
def fatal_if_dirty(repo):
"""
Checks whether there are pending changes and exits the program if there are.
"""
info('Checking for pending changes')
if repo.is_dirty():
warn('You have uncommitted changes, proceeding automatically would be dangerous.')
info(repo.git.status('-s'))
exit(1)
def update_main(repo, initial_branch):
"""
Switches to the main branch and pulls from origin. If an exception occurs
it switches back to the initial branch and exits.
"""
main = main_branch_name(repo)
info('Switching to %s branch' % main)
try:
repo.heads[main].checkout()
except BaseException as e:
fatal('Could not checkout %s: %s' % (main, e))
info('Pulling updates for %s branch' % main)
try:
repo.git.remote('update', '--prune')
repo.remotes.origin.pull('--no-tags')
except BaseException as e:
warn('Failed to update %s: %s' % (main, e))
initial_branch.checkout()
c = prompt_y_n('Continue anyway?')
if not c:
exit(1)
def get_branch_name(name):
"""
Returns the full, prefixed branch name.
"""
username = get_github_creds()['username']
return '%s/%s' % (username, name)
def get_auth_filename():
"""
Returns the full path to ~/.github-auth.
"""
return os.path.join(os.path.expanduser('~'), '.github-auth')
def get_github_creds():
"""
Returns a dict containing GitHub auth details. Exits with an error if the
file does not exist.
"""
fn = get_auth_filename()
if not os.path.isfile(fn):
fatal("Missing GitHub credentials. Did you run `git github-login`?")
with open(fn) as auth_file:
return json.load(auth_file)
def get_script_path():
"""
Returns the location of the current script.
"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def get_editor(repo):
"""
Returns the editor from env vars.
"""
return (repo.git.config("core.editor") or
os.environ.get("GIT_EDITOR") or
os.environ.get("VISUAL") or
os.environ.get("EDITOR", "vi"))
def edit(repo, text):
"""
Opens the user's editor with predefined text and returns the edited copy.
"""
(fd, name) = tempfile.mkstemp(prefix="git-workflow-", suffix=".txt", text=True)
try:
f = os.fdopen(fd, "w")
f.write(text)
f.close()
cmd = "%s \"%s\"" % (get_editor(repo), name)
rc = subprocess.call(cmd, shell=True)
if rc:
fatal('Edit failed (%s)' % cmd)
f = open(name)
t = f.read()
f.close()
finally:
os.unlink(name)
return t
def prompt(msg, default='', password=False):
"""
Wrapper around raw_input and getpass.getpass.
"""
suffix = ''
if default != '':
suffix = '[%s] ' % default
msg = '%s: %s' % (msg, suffix)
if password:
answer = getpass.getpass(msg)
else:
# raw_input in python2, input in python3
try:
answer = raw_input(msg)
except NameError:
answer = input(msg)
return answer or default
def prompt_y_n(msg, default=False):
"""
Prompt user with given message for a yes/no answer (returning a boolean).
If user hits 'enter' w/o supplying an answer, return 'default' value.
"""
suffix = ' [y/N]' # default answer is 'No'
if default:
suffix = ' [Y/n]' # default answer is 'Yes'
answer = prompt(msg + suffix)
if answer.lower() in ['y', 'yes']:
return True
elif answer == '':
return default
else:
return False
def fatal(msg, code=1):
"""
Prints a red error message and then exits the program.
"""
error(msg)
sys.exit(code)
def error(msg):
"""
Prints a red error message.
"""
logging.error(RED + BOLD + msg + RESET)
def info(msg):
"""
Prints an info message in blue.
"""
logging.info(BLUE + ITALIC + '> ' + msg + RESET)
def success(msg):
"""
Prints a message in green.
"""
logging.error(GREEN + '> ' + msg + RESET)
def warn(msg):
"""
Prints a warning in yellow.
"""
logging.warning(YELLOW + '> ' + msg + RESET)
|
dpup/git-workflow
|
util.py
|
Python
|
apache-2.0
| 4,588
|
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
@testing.parameterize(*testing.product_dict(
[
{'in_shape': (10, 5), 'out_shape': (10,)},
{'in_shape': (0, 5), 'out_shape': (0,)},
{'in_shape': (1, 33), 'out_shape': (1,)},
{'in_shape': (10, 5), 'out_shape': (10,)},
{'in_shape': (10, 5), 'out_shape': (10,)},
],
[
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
))
class TestSelectItem(unittest.TestCase):
def setUp(self):
self.x_data = numpy.random.uniform(
-1, 1, self.in_shape).astype(self.dtype)
self.t_data = numpy.random.randint(
0, 2, self.out_shape).astype(numpy.int32)
self.gy_data = numpy.random.uniform(
-1, 1, self.out_shape).astype(self.dtype)
self.check_backward_options = {}
if self.dtype == numpy.float16:
self.check_backward_options = {'atol': 0.05, 'rtol': 0.05}
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
y = functions.select_item(x, t)
y_exp = cuda.to_cpu(x_data)[range(t_data.size), cuda.to_cpu(t_data)]
self.assertEqual(y.data.dtype, self.dtype)
numpy.testing.assert_equal(cuda.to_cpu(y.data), y_exp)
def test_forward_cpu(self):
self.check_forward(self.x_data, self.t_data)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.t_data))
def check_backward(self, x_data, t_data, gy_data):
gradient_check.check_backward(
functions.SelectItem(),
(x_data, t_data), gy_data, eps=0.01, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.x_data, self.t_data, self.gy_data)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x_data),
cuda.to_gpu(self.t_data),
cuda.to_gpu(self.gy_data))
@testing.parameterize(
{'t_value': -1, 'valid': False},
{'t_value': 3, 'valid': False},
{'t_value': 0, 'valid': True},
)
class TestSelectItemValueCheck(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (1, 2)).astype(numpy.float32)
self.t = numpy.array([self.t_value], dtype=numpy.int32)
self.original_debug = chainer.is_debug()
chainer.set_debug(True)
def tearDown(self):
chainer.set_debug(self.original_debug)
def check_value_check(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
if self.valid:
# Check if it throws nothing
functions.select_item(x, t)
else:
with self.assertRaises(ValueError):
functions.select_item(x, t)
def test_value_check_cpu(self):
self.check_value_check(self.x, self.t)
@attr.gpu
def test_value_check_gpu(self):
self.check_value_check(self.x, self.t)
testing.run_module(__name__, __file__)
|
kiyukuta/chainer
|
tests/chainer_tests/functions_tests/array_tests/test_select_item.py
|
Python
|
mit
| 3,311
|
#!/usr/bin/env python
from copy import deepcopy
from trainingobjs import (findLengths, TrainingDay, TrainingItem, toRunItem, Week,
REST, RACE)
from typing import Dict, List
# caloric estimates
# swim ~390 kcal / 1 mile
# bike ~650 kcal / 12.5 mile
# run ~450 kcal / 3.1 mile
# friendly names for typing
TrainingPlan = List[Week]
TrainingCollection = Dict[str, TrainingPlan]
def toFiveDays(training):
adjusted = []
for week in training:
# hard code resting on Thursday if there are too many working days
restThursday = bool(len([item for item in week
if item != REST]) > 5)
thursday = week.fri
# TODO need to be smarter about this
if restThursday:
thursday = REST
week = Week(week.tue, week.wed, week.thu, thursday, week.sat,
week.sun, REST)
adjusted.append(week)
# remove the race from the last week
week = list(adjusted[-1])
week[-2] = RACE
adjusted[-1] = Week(*week)
return adjusted
# #### half ironman training
# SWIM 1.9 km
# BIKE 90 km
# RUN 21.1 km
# https://www.triathlete.com/training/super-simple-ironman-70-3-triathlon-training-plan/
# #### olympic triathlon
# SWIM 1.5 km = 0.93 mile
# BIKE 40 km = 24.8 mile
# RUN 10 km = 6.2 mile
# http://www.chicotriathlonclub.com/Documents/Olympic_Distance_Program.pdf
triathlon: TrainingCollection = {'olympic':
# week 1
[Week(REST, TrainingItem('Run 25 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 500 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 30 min', 'Tempo RPE 7')]),
TrainingItem('Run 20 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 750 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 45 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 60 min', 'Long RPE 6'),
TrainingItem('Run 30 min', 'Long RPE 6')),
# week 2
Week(REST, TrainingItem('Run 30 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 500 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 30 min', 'Tempo RPE 7')]),
TrainingItem('Run 20 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1000 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 45 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 70 min', 'Long RPE 6'),
TrainingItem('Run 40 min', 'Long RPE 6')),
# week 3
Week(REST, TrainingItem('Run 30 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 35 min', 'Tempo RPE 7')]),
TrainingItem('Run 25 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1000 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 50 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 75 min', 'Long RPE 6'),
TrainingItem('Run 40 min', 'Long RPE 6')),
# week 4
Week(REST, TrainingItem('Run 25 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 500 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 25 min', 'Tempo RPE 7')]),
TrainingItem('Run 20 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1000 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 40 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 60 min', 'Long RPE 6'),
TrainingItem('Run 30 min', 'Long RPE 6')),
# week 5
Week(REST, TrainingItem('Run 30 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 30 min', 'Tempo RPE 7')]),
TrainingItem('Run 20 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1000 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 45 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 70 min', 'Long RPE 6'),
TrainingItem('Run 35 min', 'Long RPE 6')),
# week 6
Week(REST, TrainingItem('Run 35 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 35 min', 'Tempo RPE 7')]),
TrainingItem('Run 25 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1250 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 50 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 80 min', 'Long RPE 6'),
TrainingItem('Run 45 min', 'Long RPE 6')),
# week 7
Week(REST, TrainingItem('Run 40 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 40 min', 'Tempo RPE 7')]),
TrainingItem('Run 25 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1500 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 55 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 90 min', 'Long RPE 6'),
TrainingItem('Run 50 min', 'Long RPE 6')),
# week 8
Week(REST, TrainingItem('Run 25 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 25 min', 'Tempo RPE 7')]),
TrainingItem('Run 20 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1250 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 40 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 60 min', 'Long RPE 6'),
TrainingItem('Run 35 min', 'Long RPE 6')),
# week 9
Week(REST, TrainingItem('Run 35 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 30 min', 'Tempo RPE 7')]),
TrainingItem('Run 25 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1250 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 50 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 85 min', 'Long RPE 6'),
TrainingItem('Run 45 min', 'Long RPE 6')),
# week 10
Week(REST, TrainingItem('Run 40 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 35 min', 'Tempo RPE 7')]),
TrainingItem('Run 25 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1500 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 60 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 100 min', 'Long RPE 6'),
TrainingItem('Run 50 min', 'Long RPE 6')),
# week 11
Week(REST, TrainingItem('Run 35 min', 'Moderate RPE 6-7'),
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7 + strength'),
TrainingItem('Bike 40 min', 'Tempo RPE 7')]),
TrainingItem('Run 25 min', 'Tempo RPE 7'),
TrainingDay([TrainingItem('Swim 1500 m', 'Long RPE 6 + strength'),
TrainingItem('Bike 45 min', 'Tempo RPE 6-7')]),
TrainingItem('Bike 75 min', 'Long RPE 6'),
TrainingItem('Run 40 min', 'Long RPE 6')),
# week 12
Week(REST,
TrainingDay([TrainingItem('Swim 750 m', 'Tempo RPE 7'),
TrainingItem('Run 20 min', 'Tempo RPE 7')]),
TrainingItem('Bike 30 min', 'Tempo RPE 7'),
REST, REST,
TrainingDay([TrainingItem('Bike 20 min', 'Easy RPE 6-7'),
TrainingItem('Run 15 min', 'Easy with pick-ups RPE 6')]),
RACE)],
# https://www.californiatriathlon.org/coaching/training-plans/12-week-olympic-training-plan/
'olympic2':
# week 1
[Week(REST, TrainingItem('Swim 40 min',
'40 minute easy swim, taking breaks as needed'),
TrainingItem('Bike 60 min', 'Easy 60 minute bike ride'),
TrainingItem('Run 45 min',
'WU 10 minutes (brisk walk), easy 30 minute run, 5 minute CD'),
TrainingItem('Swim 40 min', '40 minute easy swim, taking breaks as needed'),
TrainingItem('Bike 75 min',
'WU 10 minutes (easy spinning), 60 minute medium effort, '
'5 minute cool down'),
TrainingItem('Run 50 min', 'WU 10 minutes (brisk walk), 40 minute easy run')),
# week 2
Week(REST, TrainingItem('Swim 40 min',
'40 minute easy swim, taking breaks as needed'),
TrainingItem('Bike 60 min', 'Easy 60 minute bike ride'),
TrainingItem('Run 55 min', 'WU 10 minutes (easy jog), easy 40 minute run, '
'5 minute CD'),
TrainingItem('Swim 40 min', '40 minute easy swim, taking breaks as needed'),
TrainingItem('Bike 75 min',
'WU 10 minutes (easy spinning), 60 minute medium effort, '
'5 minute cool down'),
TrainingItem('Run 60 min', 'WU 10 minutes (brisk walk), 50 minute easy run')),
# week 3
Week(REST, TrainingItem('Swim 40 min', '40 minute easy swim, taking breaks '
'as needed'),
TrainingItem('Bike 60 min', 'Easy 60 minute bike ride'),
TrainingItem('Run 60 min', 'WU 10 minutes (easy jog), easy 45 minute run, '
'5 minute CD'),
TrainingItem('Swim 40 min', '40 minute easy swim, taking breaks as needed'),
TrainingItem('Bike 75 min',
'WU 10 minutes (easy spinning), 60 minute medium effort, '
'5 minute cool down'),
TrainingItem('Run 60 min', 'WU 10 minutes (brisk walk), 50 minute easy run')),
# week 4
Week(REST, TrainingItem('Swim 60 min',
'10 minute WU, swim 4x 200 at a medium-hard effort with '
'1 minute recovery between sets, CD 5 minutes easy'),
REST, TrainingItem('Bike 75 min',
'WU 10 minutes easy spinning, 60 minutes easy effort, '
'5 minute CD'),
REST, TrainingItem('Run 75 min',
'WU 10 minutes (brisk walk), 60 minutes easy to medium '
'effort, 5 minute CD'),
REST),
# week 5
Week(REST, TrainingItem('Swim 60 min',
'WU 10 minutes, swim 4x 250 at a medium effort with 1 minute'
' recovery between sets, CD 5 minutes easy'),
TrainingItem('Bike 70 min', 'Easy 70 minute bike ride'),
TrainingItem('Run 60 min',
'WU 10 minutes (easy jog), 45 minute easy run with 10 min hard in'
' the middle, 5 minute CD'),
TrainingItem('Swim 60 min', 'WU 10 minutes, 4x25 sprints, 30 minutes easy spin,'
' CD 5 minutes'),
TrainingItem('Bike 115 min',
'WU 10 minutes (easy spinning), 100 minute medium effort, '
'5 minute cool down'),
TrainingItem('Run 75 min', 'WU 10 minutes (easy jog), 60 minute easy run, '
'5 minute CD')),
# week 6
Week(REST, TrainingItem('Swim 60 min',
'WU 10 minutes, swim 4x 250 at a medium effort with 1 minute'
' recovery between sets, CD 5 minutes easy'),
TrainingItem('Bike 70 min', 'Easy 70 minute bike ride'),
TrainingItem('Run 60 min', 'WU 10 minutes (easy jog), 45 minute easy run, '
'5 minute CD'),
TrainingItem('Swim 60 min',
'WU 10 minutes, 5x25 sprints, 35 minutes easy spin, CD 5 minutes'),
TrainingItem('Bike 135 min',
'WU 10 minutes (easy spinning), 120 minute medium effort, '
'5 minute cool down'),
TrainingItem('Run 85 min',
'WU 10 minutes (easy jog), 70 minute easy run, 5 minute CD')),
# week 7
Week(REST,
TrainingItem('Swim 60 min',
'WU 10 minutes, swim 4x 250 at a medium effort with 1 minute '
'recovery between sets, CD 5 minutes easy'),
TrainingItem('Bike 70 min', 'Easy 70 minute bike ride'),
TrainingItem('Run 60 min',
'WU 10 minutes (easy jog), 45 minute easy run with 10 min hard in '
'the middle, 5 minute CD'),
TrainingItem('Swim 60 min', 'WU 10 minutes, 6x25 sprints, 40 minutes easy spin,'
' CD 5 minutes'),
TrainingItem('Bike 160 min',
'WU 10 minutes (easy spinning), 140 minutes medium with 10 minutes'
' in the middle at hard effort, 5 minutes CD'),
TrainingItem('Run 95 min', 'WU 10 minutes (easy jog), 80 minute easy run, '
'5 minute CD')),
# week 8
Week(REST, TrainingItem('Swim 60 min',
'WU 10 minutes, 30 minutes steady race effort, 10 minute '
'easy swim, CD 5 minutes '),
REST, TrainingItem('Run 60 min', 'WU 10 minutes (easy jog), 45 minute easy run,'
' 5 minute CD'),
REST,
TrainingItem('Bike 200 min',
'WU 10 minutes (easy spinning), 180 minutes sustained medium '
'effort, 10 minutes CD'),
REST),
# week 9
Week(REST, TrainingItem('Swim 60 min', 'WU 10 minutes, 4x 300 medium effort with 1 '
'minute recovery between sets, CD 5 minutes'),
TrainingItem('Bike 70 min', 'Easy 70 minute bike ride'),
TrainingItem('Run 65 min', 'WU 10 minutes (brisk jog), easy 50 minute run, '
'5 minute CD'),
TrainingItem('Swim 60 min', 'WU 10 minutes, 4x50 sprints, 35 minutes easy '
'spin, CD 5 minutes'),
TrainingItem('Brick 175 min',
'Bike 10 minutes easy spinning, 150 minutes moderated spinning, '
'5 minute CD. Immediately transition to running shoes and '
'run 10 minutes easy'),
TrainingItem('Run 40 min', 'WU 10 minutes (brisk walk), 30 minute easy run')),
# week 10
Week(REST, TrainingItem('Swim 60 min',
'WU 10 minutes, 4x 350 medium effort with 1 minute recovery '
'between sets, CD 5 minutes'),
TrainingItem('Bike 60 min', 'Easy 60 minute bike ride'),
TrainingItem('Run 65 min',
'WU 10 minutes (brisk jog), easy 50 minute run with 10 minutes '
'hard in the middle, 5 minute CD'),
TrainingItem('Swim 60 min', 'WU 10 minutes, 4x50 sprints, Spin 35 min easy'),
TrainingItem('Brick 195 min',
'Bike 10 minutes easy spinning, 160 minutes moderated spinning, '
'10 minute CD. Immediately transition to running shoes and '
'run 10 minutes easy'),
TrainingItem('Run 50 min', 'WU 10 minutes (brisk walk), 40 minute easy run')),
# week 11
Week(REST, TrainingItem('Swim 60 min',
'WU 10 minutes, 4x 300 easy/medium effort with 1 minute '
'recovery between sets, CD 5 minutes'),
TrainingItem('Bike 45 min', 'Medium effort 45 minute bike ride'),
TrainingItem('Run 55 min', 'WU 10 minutes (easy jog), easy 40 minute run, '
'5 minute CD'),
TrainingItem('Swim 60 min', 'WU 10 minutes, 5x25 sprints, 35 minutes easy spin,'
' CD 5 minutes'),
TrainingItem('Brick 130 min',
'Bike 10 minutes easy spinning, 120 minutes moderated spinning,'
' 5 minute CD. Immediately transition to running shoes and run '
'10 minutes easy'),
TrainingItem('Run 40 min',
'WU 10 minutes (brisk walk), 30 minute easy run')),
# week 12
Week(REST, TrainingItem('Run 30 min',
'10 minute WU (walk or slow jog), 15 minute easy run, '
'5 minute CD'),
TrainingItem('Bike 45 min', '45 minute easy spinning'),
TrainingItem('Swim 60 min',
'WU 5 minutes, 4x 200 at easy effort with '
'1 minute recovery between sets, 5 minute CD'),
REST,
TrainingDay([TrainingItem('Easy 10 min swim'),
TrainingItem('Easy 30 min bike'),
TrainingItem('Easy 15 min run')]), # not back to back
RACE)]}
def parseHal(raw):
program = []
for line in raw.split('\n'):
line = line.strip()
if not line:
continue
line = [toRunItem(item) for item in line.split('\t')[1:]]
week = Week(*line)
program.append(week)
return program
# all the following are tab delimited
runningraw = {'marathon': # https://www.halhigdon.com/training-programs/marathon-training/intermediate-2-marathon/
'''
1 Cross 3 mi run 5 mi run 3 mi run Rest 5 mi pace 10 miles
2 Cross 3 mi run 5 mi run 3 mi run Rest 5 mi run 11 miles
3 Cross 3 mi run 6 mi run 3 mi run Rest 6 mi pace 8 miles
4 Cross 3 mi run 6 mi run 3 mi run Rest 6 mi pace 13 miles
5 Cross 3 mi run 7 mi run 3 mi run Rest 7 mi run 14 miles
6 Cross 3 mi run 7 mi run 3 mi run Rest 7 mi pace 10 miles
7 Cross 4 mi run 8 mi run 4 mi run Rest 8 mi pace 16 miles
8 Cross 4 mi run 8 mi run 4 mi run Rest 8 mi run 17 miles
9 Cross 4 mi run 9 mi run 4 mi run Rest Rest Half Marathon
10 Cross 4 mi run 9 mi run 4 mi run Rest 9 mi pace 19 miles
11 Cross 5 mi run 10 mi run 5 mi run Rest 10 mi run 20 miles
12 Cross 5 mi run 6 mi run 5 mi run Rest 6 mi pace 12 miles
13 Cross 5 mi run 10 mi run 5 mi run Rest 10 mi pace 20 miles
14 Cross 5 mi run 6 mi run 5 mi run Rest 6 mi run 12 miles
15 Cross 5 mi run 10 mi run 5 mi run Rest 10 mi pace 20 miles
16 Cross 5 mi run 8 mi run 5 mi run Rest 4 mi pace 12 miles
17 Cross 4 mi run 6 mi run 4 mi run Rest 4 mi run 8 miles
18 Cross 3 mi run 4 mi run Rest Rest 2 mi run Marathon
''', # noqa: E128
'half': # https://www.halhigdon.com/training-programs/half-marathon-training/intermediate-1-half-marathon/
'''
1 30 min cross 3 mi run 4 mi run 3 mi run Rest 3 mi run 4 mi run
2 30 min cross 3 mi run 4 mi pace 3 mi run Rest 3 mi pace 5 mi run
3 40 min cross 3.5 mi run 5 mi run 3.5 mi run Rest Rest 6 mi run
4 40 min cross 3.5 mi run 5 mi pace 3.5 mi run Rest 3 mi run 7 mi run
5 40 min cross 4 mi run 6 mi run 4 mi run Rest 3 mi pace 8 mi run
6 50 min cross 4 mi run 6 mi pace 4 mi run Rest or easy run Rest 5-K Race
7 Rest 4.5 mi run 7 mi run 4.5 mi run Rest 4 mi pace 9 mi run
8 50 min cross 4.5 mi run 7 mi pace 4.5 mi run Rest 5 mi pace 10 mi run
9 60 min cross 5 mi run 8 mi run 5 mi run Rest or easy run Rest 10-K Race
10 Rest 5 mi run 8 mi pace 5 mi run Rest 5 mi pace 11 mi run
11 60 min cross 5 mi run 6 mi run 4 mi run Rest 3 mi pace 12 mi run
12 Rest 4 mi run 4 mi pace 2 mi run Rest Rest Half Marathon
''',
'half-n2': # https://www.halhigdon.com/training-programs/half-marathon-training/novice-2-half-marathon/
'''
1 60 min cross Rest 3 mi run 3 mi run 3 mi run Rest 4 mi run
2 60 min cross Rest 3 mi run 3 mi pace 3 mi run Rest 5 mi run
3 60 min cross Rest 3 mi run 4 mi run 3 mi run Rest 6 mi run
4 60 min cross Rest 3 mi run 4 mi pace 3 mi run Rest 7 mi run
5 60 min cross Rest 3 mi run 4 mi run 3 mi run Rest 8 mi run
6 60 min cross Rest 3 mi run 4 mi pace 3 mi run Rest 5-K Race
7 60 min cross Rest 3 mi run 5 mi run 3 mi run Rest 9 mi run
8 60 min cross Rest 3 mi run 5 mi pace 3 mi run Rest 10 mi run
9 60 min cross Rest 3 mi run 5 mi run 3 mi run Rest 10-K Race
10 60 min cross Rest 3 mi run 5 mi pace 3 mi run Rest 11 mi run
11 60 min cross Rest 3 mi run 5 mi run 3 mi run Rest 12 mi run
12 Rest Rest 3 mi run 2 mi pace 2 mi run Rest Half Marathon
''',
'ultra-hal': # https://www.halhigdon.com/training-programs/more-training/ultramarathon-50k/
'''
1 Rest 3 mi run 5 mi run 3 mi run Rest 5 mi pace 10 mi run
2 Rest 3 mi run 5 mi run 3 mi run Rest 5 mi run 1.5 hr run
3 Rest 3 mi run 6 mi run 3 mi run Rest 6 mi pace 8 mi run
4 Rest 3 mi run 6 mi run 3 mi run Rest 6 mi pace 13 mi run
5 Rest 3 mi run 7 mi run 3 mi run Rest 7 mi run 2 hr run
6 Rest 3 mi run 7 mi run 3 mi run Rest 7 mi pace 10 mi run
7 Rest 4 mi run 8 mi run 4 mi run Rest 5 mi pace 16 mi run
8 Rest 4 mi run 8 mi run 4 mi run Rest 8 mi run 2.5 hr run
9 Rest 4 mi run 9 mi run 4 mi run Rest Rest 13.1 mi
10 Rest 4 mi run 9 mi run 4 mi run Rest 9 mi pace 3 hr run
11 Rest 5 mi run 10 mi run 5 mi run Rest 10 mi run 20 mi run
12 Rest 5 mi run 6 mi run 5 mi run Rest 6 mi pace 2 hr run
13 Rest 5 mi run 10 mi run 5 mi run Rest 10 mi pace 20 mi run
14 Rest 5 mi run 6 mi run 5 mi run Rest 6 mi run 2.5 hr run
15 Rest 5 mi run 10 mi run 5 mi run Rest 10 mi pace 20 mi run
16 Rest 5 mi run 8 mi run 5 mi run Rest 10 mi pace 3 hr run
17 Rest 4 mi run 6 mi run 4 mi run Rest 4 mi pace 8 mi run
18 Rest 3 mi run 4 mi run Rest Rest 2 mi run 26.2 mi
19 Rest Rest Rest 3 mi run Rest 1.0 hr run 1.0 hr run
20 Rest 3 mi run 10 mi run 3 mi run Rest 1.0 hr pace 3.0 hr run
21 Rest 3 mi run 6 mi run 3 mi run Rest 1.5 hr run 2.0 hr run
22 Rest 3 mi run 10 mi run 3 mi run Rest 1.5 hr pace 4.0 hr run
23 Rest 4 mi run 7 mi run 4 mi run Rest 2.0 hr run 3.0 hr run
24 Rest 4 mi run 10 mi run 4 mi run Rest 2.0 hr pace 5.0 hr run
25 Rest 4 mi run 8 mi run 4 mi run Rest 1.0 hr run 1.0 hr run
26 Rest 4 mi run 4 mi run Rest Rest 2 mi run 31.1 mi
'''
}
# convert running to standard form
running: TrainingCollection = dict()
for name, training in runningraw.items():
mytraining = parseHal(training)
mytraining = toFiveDays(mytraining)
running[name] = mytraining
del mytraining
del runningraw
# metric century training program
# https://www.endurancemag.com/2014/05/cycling-8-week-metric-training-plan/
bike: TrainingCollection = {'century':
# week 1
[Week(REST, TrainingItem('Bike 60 min', 'Easy ride of 60 minutes at your own pace'),
REST, TrainingItem('Bike 60 min', 'Bike 60 min - 20 easy, 20 hard, 20 easy'),
REST, TrainingItem('Bike 20 miles'), REST),
Week(REST, TrainingItem('Bike 60 min', 'Easy ride of 60 minutes'), # week 2
REST, TrainingItem('Bike 60 min', 'Bike 60 min - 20 easy, 20 hard, 20 easy'),
REST, TrainingItem('Bike 24 miles'), REST),
Week(REST, TrainingItem('Bike 60 min', '60-minute ride with hills'), # week 3
REST, TrainingItem('Bike 60 min', 'Bike 60 min - 15 easy, 30 hard, 15 easy'), REST,
TrainingItem('Bike 30 miles'), REST),
Week(REST, TrainingItem('Bike 60 min', '60-minute ride with hills'), # week 4
REST, TrainingItem('Bike 60 min', 'Bike 60 min - 15 easy, 30 hard, 15 easy'),
REST, TrainingItem('Bike 34 miles'), REST),
# week 5
Week(REST, TrainingItem('Bike 60 min',
'60-minute ride with hills, pushing the last 20 minutes'),
REST, TrainingItem('Bike 60 min', 'Bike 60 min - 15 easy, 30 hard, 15 easy'),
REST, TrainingItem('Bike 41 miles'), REST),
# week 6
Week(REST, TrainingItem('Bike 60 min',
'60-minute ride with hills, pushing the last 20 minutes'),
REST, TrainingItem('Bike 60 min', 'Bike 60 min - 10 easy, 10 hard, 3 repetitions'),
REST, TrainingItem('Bike 46 miles'), REST),
# week 7
Week(REST, TrainingItem('Bike 60 min',
'60-minute ride with hills, pushing the last 30 minutes'),
REST, TrainingItem('Bike 60 min', 'Bike 60 min - 10 easy, 10 hard, 3 repetitions'),
REST, TrainingItem('Bike 54 miles'), REST),
# week 8
Week(REST, TrainingItem('Bike 60 min',
'60-minute ride with hills, pushing the last 30 minutes'),
REST, TrainingItem('Bike 60 min', 'Bike 60 min - 10 easy, 10 hard, 3 repetitions'),
REST, RACE,
REST)]} # Bike metric century
# #### custom plan for 2021 - raw version
rawWacky = deepcopy(triathlon['olympic'])
# pad with rest
for i in range(8):
rawWacky.append(Week(REST, REST, REST, REST, REST, REST, REST))
# merge with a marathon starting 2 weeks later
for i in range(len(running['marathon'])):
newweek = list(running['marathon'][i])
newweek.insert(0, REST)
del newweek[-1]
newweek[4], newweek[5] = newweek[5], newweek[4]
rawWacky[i+2] = Week(*newweek) + rawWacky[i+2]
# function to aid making wacky
def makeWacky(left, right, sunday):
newweek = [REST]
newweek.append(left.mon)
newweek.append(right.wed)
newweek.append(left.wed)
newweek.append(right.fri)
newweek.append(right.sat)
if isinstance(sunday, str):
newweek.append(TrainingItem(sunday))
else:
newweek.append(sunday)
return Week(*newweek)
# #### custom plan for 2021 - reduced version
wacky = triathlon['olympic'][:2] # copy first weeks from triathlon
assert wacky[0].tue == triathlon['olympic'][0].tue
# overlap weeks
for i, descr in enumerate(('Run 7 miles', 'Run 7 miles', 'Run 5.5 miles', 'Run 9 miles', 'Run 9.5 miles',
'Run 6.5 miles', 'Run 10 miles', 'Run 11 miles', 'Run 8.5 miles')):
wacky.append(makeWacky(running['marathon'][i], triathlon['olympic'][i + 2], descr))
# race week
wacky.append(deepcopy(triathlon['olympic'][-1]))
# copy over the remainder of the marathon weeks
wacky.extend(deepcopy(running['marathon'][-8:-1]))
# final week is special
finalweek = deepcopy(running['marathon'][-1])
wacky.append(Week(REST, finalweek.mon, finalweek.tue, REST, finalweek.fri, REST, RACE))
# put together a single list of training
trainingplans: TrainingCollection = {**running, **bike, **triathlon} # type: ignore
trainingplans['rawwacky'] = rawWacky
trainingplans['wacky'] = wacky
# update the length of fields for printing the table
for name in trainingplans.keys():
lengths = findLengths(trainingplans[name])
for i in range(len(trainingplans[name])):
trainingplans[name][i].setTableLengths(lengths)
|
peterfpeterson/musings
|
python/trainingplans.py
|
Python
|
mit
| 35,242
|
#!/usr/bin/env python
import os
import sys
import dotenv
PROJECT_PATH = os.path.dirname(__file__)
dotenv.load_dotenv(os.path.join(PROJECT_PATH, ".env"))
dotenv.load_dotenv(os.path.join(PROJECT_PATH, ".env_defaults"))
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "eth_computation_market.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
pipermerriam/ethereum-computation-market-web
|
manage.py
|
Python
|
mit
| 441
|
__author__ = 'Audrey Roy'
__email__ = 'audreyr@gmail.com'
__version__ = '0.3.0'
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/binaryornot/__init__.py
|
Python
|
agpl-3.0
| 80
|
# -*- coding: utf-8 -*-
###############################################################################
# This file is part of Resistencia Cadiz 1812. #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
# Copyright (C) 2010, Pablo Recio Quijano, <pablo.recioquijano@alum.uca.es> #
###############################################################################
import os.path
import random
import gtk
from resistencia import configure, xdg, filenames
from resistencia.nls import gettext as _
from resistencia.tests import selection
from guadaboard import human_game_handler
class humanGameDialog:
def __init__(self, parent):
self.rules_computer = ''
self.formation_computer = ''
self.formation_player = ''
builder = gtk.Builder()
builder.add_from_file(xdg.get_data_path('glade/humanEsDialog.glade'))
def_path = configure.load_configuration()['se_path']
def_rules_path = def_path + '/rules'
def_formations_path = def_path + '/formations'
self.human_ia_dialog = builder.get_object('human_ia_dialog')
self.human_ia_dialog.set_transient_for(parent)
builder.get_object('file_chooser_es_ia').set_current_folder(def_rules_path)
builder.get_object('file_chooser_team_ia').set_current_folder(def_formations_path)
builder.get_object('file_chooser_team').set_current_folder(def_formations_path)
self.file_chooser_es_ia = builder.get_object('file_chooser_es_ia')
self.file_chooser_team_ia = builder.get_object('file_chooser_team_ia')
self.error_es_ia = builder.get_object("error_no_es_ia")
self.error_es_ia.connect('response', lambda d, r: d.hide())
self.error_es_ia.set_transient_for(self.human_ia_dialog)
self.error_team_ia = builder.get_object("error_no_team_ia")
self.error_team_ia.connect('response', lambda d, r: d.hide())
self.error_team_ia.set_transient_for(self.human_ia_dialog)
self.error_team = builder.get_object("error_no_team")
self.error_team.connect('response', lambda d, r: d.hide())
self.error_team.set_transient_for(self.human_ia_dialog)
self.dlg_bad_file = builder.get_object('dlg_bad_file')
self.dlg_bad_file.connect('response', lambda d, r: d.hide())
self.dlg_bad_file.set_transient_for(self.human_ia_dialog)
self.num_turns = 120
self.spin_turns = builder.get_object("spin_num_turns")
self.spin_turns.set_range(50,300)
self.spin_turns.set_increments(1,10)
self.spin_turns.set_value(self.num_turns)
#---------------
self.dont_save_game = False
self.human_team = 'A'
self.random_computer = False
builder.connect_signals(self)
def on_file_chooser_team_file_set(self, widget, data=None):
self.formation_player = widget.get_uri().replace('file://', '')
def on_file_chooser_es_ia_file_set(self, widget, data=None):
self.rules_computer = widget.get_uri().replace('file://', '')
def on_file_chooser_team_ia_file_set(self, widget, data=None):
self.formation_computer = widget.get_uri().replace('file://', '')
def on_radio_a_team_toggled(self, widget, data=None):
if widget.get_active():
self.human_team = 'A'
def on_radio_b_team_toggled(self, widget, data=None):
if widget.get_active():
self.human_team = 'B'
def on_check_random_team_toggled(self, widget, data=None):
self.random_computer = widget.get_active()
if self.random_computer:
self.file_chooser_es_ia.set_sensitive(False)
self.file_chooser_team_ia.set_sensitive(False)
else:
self.file_chooser_es_ia.set_sensitive(True)
self.file_chooser_team_ia.set_sensitive(True)
def on_spin_num_turns_change_value(self, widget, data=None):
self.num_turns = int(widget.get_value())
def on_spin_num_turns_value_changed(self, widget, data=None):
self.num_turns = int(widget.get_value())
def on_check_dont_save_toggled(self, widget):
self.dont_save_game = widget.get_active()
def on_human_ia_dialog_close(self, widget, data=None):
self.human_ia_dialog.hide()
def on_btn_cancel_clicked(self, widget, data=None):
self.human_ia_dialog.hide()
def on_btn_apply_clicked(self, widget, data=None):
correct = True
if len(self.formation_player) == 0:
self.error_team.run()
correct = False
if len(self.rules_computer) == 0 and not self.random_computer:
self.error_es_ia.run()
correct = False
if len(self.formation_computer) == 0 and not self.random_computer:
self.error_team_ia.run()
correct = False
if correct:
computer_team = None
if self.random_computer:
teams = selection.get_installed_teams()
computer_team = teams[random.randint(0, len(teams)-1)]
else:
computer_team = (self.rules_computer, self.formation_computer)
self.human_ia_dialog.destroy()
while gtk.events_pending():
gtk.main_iteration(False)
try:
human_game_handler.init_human_game(self.formation_player,
computer_team, self.human_team,
self.num_turns, self.dont_save_game)
except human_game_handler.FileError as e:
self.dlg_bad_file.format_secondary_text(e.msg)
self.dlg_bad_file.run()
self.quick_game.show()
|
pablorecio/resistencia-1812
|
resistencia/gui/human_game_dialog.py
|
Python
|
gpl-3.0
| 6,924
|
#!/bin/python3
import bisect
def is_palindrome(n):
return str(n) == str(n)[::-1]
def generate_palindromes():
return [i * j
for i in range(100, 1000)
for j in range(100, 1000)
if is_palindrome(i * j)]
def find_lt(a, x):
'Find rightmost value less than x'
i = bisect.bisect_left(a, x)
if i:
return a[i - 1]
raise ValueError
palindromes = sorted(generate_palindromes())
test_cases = int(input().strip())
for _ in range(test_cases):
n = int(input().strip())
print(find_lt(palindromes, n))
|
rootulp/hackerrank
|
python/euler004.py
|
Python
|
mit
| 570
|
"""Functions for statistical analysis"""
from .parametric import (
f_threshold_twoway_rm, f_threshold_mway_rm, f_twoway_rm, f_mway_rm)
from .permutations import permutation_t_test
from .cluster_level import (permutation_cluster_test,
permutation_cluster_1samp_test,
spatio_temporal_cluster_1samp_test,
spatio_temporal_cluster_test,
_st_mask_from_s_inds,
ttest_1samp_no_p,
summarize_clusters_stc)
from .multi_comp import fdr_correction, bonferroni_correction
from .regression import linear_regression
|
trachelr/mne-python
|
mne/stats/__init__.py
|
Python
|
bsd-3-clause
| 674
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# @author - Fekete Mihai <feketemihai@gmail.com>
# Copyright (C) 2011 TOTAL PC SYSTEMS (http://www.www.erpsystems.ro).
# Copyright (C) 2009 (<http://www.filsystem.ro>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Romania - Accounting",
"version" : "1.0",
"author" : "ERPsystems Solutions",
"website": "http://www.erpsystems.ro",
"category" : "Localization/Account Charts",
"depends" : ['account','account_chart','base_vat'],
"description": """
This is the module to manage the Accounting Chart, VAT structure, Fiscal Position and Tax Mapping.
It also adds the Registration Number for Romania in OpenERP.
================================================================================================================
Romanian accounting chart and localization.
""",
"demo" : [],
"data" : ['partner_view.xml',
'account_chart.xml',
'account_tax_code_template.xml',
'account_chart_template.xml',
'account_tax_template.xml',
'fiscal_position_template.xml',
'l10n_chart_ro_wizard.xml',
],
"installable": True,
}
|
OpusVL/odoo
|
addons/l10n_ro/__openerp__.py
|
Python
|
agpl-3.0
| 1,982
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/python
# coding: latin-1
smokecfg = {
'browser': 'Firefox',
# 'window position': '10, 10', # upper left coordinates
# 'window size': '2000, 1500',
'cssite': 'http://127.0.0.1:8080/client/',
# 'cssite': 'http://192.168.1.31:8080/client/',
'username': 'admin',
'password': 'password',
'badusername': 'badname',
'badpassword': 'badpassword',
'sqlinjection_1': '\' or 1=1 --\'',
'sqlinjection_2': '\' union select 1, badusername, badpassword, 1--\'',
'sqlinjection_3': '\' union select @@version,1,1,1--\'',
'sqlinjection_4': '\'; drop table user--\'',
'sqlinjection_5': '\'OR\' \'=\'',
'language': 'English',
# add a new user account
'new user account':{'username': 'JohnD',
'password': 'password',
'email': 'johndoe@aol.com',
'firstname': 'John',
'lastname': 'Doe',
'domain': 'ROOT',
'type': 'User', # either 'User' or 'Admin'
'timezone': 'US/Eastern [Eastern Standard Time]',
},
# add a new user under JohnD
'account': {'username': 'JohnD',
'domain': 'ROOT',
'type': 'User',
},
# add a new user
'new user': {'username': 'JaneD',
'password': 'password',
'email': 'janedoe@aol.com',
'firstname': 'Jane',
'lastname': 'Doe',
'timezone': 'US/Eastern [Eastern Standard Time]',
},
}
|
ikoula/cloudstack
|
test/selenium/cstests/smoketests/smokecfg.py
|
Python
|
gpl-2.0
| 2,928
|
# -*- coding: utf-8 -*-
#
# Input classes for fetching data via HTTP.
#
# Author: Just van den Broecke
#
import re
from urllib2 import Request, urlopen, URLError, HTTPError
import urllib
from stetl.component import Config
from stetl.input import Input
from stetl.util import Util
from stetl.packet import FORMAT
log = Util.get_log('httpinput')
class HttpInput(Input):
"""
Fetch data from remote services like WFS via HTTP protocol.
Base class: subclasses will do datatype-specific formatting of
the returned data.
produces=FORMAT.any
"""
# Start attribute config meta
# Applying Decorator pattern with the Config class to provide
# read-only config values from the configured properties.
@Config(ptype=str, default=None, required=True)
def url(self):
"""
The HTTP URL string.
Required: True
Default: None
"""
pass
@Config(ptype=dict, default=None, required=False)
def parameters(self):
"""
Flat JSON-like struct of the parameters to be appended to the url.
Example: (parameters require quotes)::
url = http://geodata.nationaalgeoregister.nl/natura2000/wfs
parameters = {
service : WFS,
version : 1.1.0,
request : GetFeature,
srsName : EPSG:28992,
outputFormat : text/xml; subtype=gml/2.1.2,
typename : natura2000
}
Required: False
Default: None
"""
pass
# End attribute config meta
def __init__(self, configdict, section, produces=FORMAT.any):
Input.__init__(self, configdict, section, produces)
log.info("url=%s parameters=%s" % (self.url, self.parameters))
def read_from_url(self, url, parameters=None):
"""
Read the data from the URL.
:param url: the url to fetch
:param parameters: optional dict of query parameters
:return:
"""
# log.info('Fetch data from URL: %s ...' % url)
req = Request(url)
try:
# Urlencode optional parameters
query_string = None
if parameters:
query_string = urllib.urlencode(parameters)
response = urlopen(req, query_string)
except HTTPError as e:
log.error('HTTPError fetching from URL %s: code=%d e=%s' % (url, e.code, e))
raise e
except URLError as e:
log.error('URLError fetching from URL %s: reason=%s e=%s' % (url, e.reason, e))
raise e
# everything is fine
return response.read()
def read(self, packet):
"""
Read the data from the URL.
:param packet:
:return:
"""
# Done with URL ?
if self.url is None:
packet.set_end_of_stream()
log.info("EOF URL reading done")
return packet
packet.data = self.format_data(self.read_from_url(self.url, self.parameters))
self.url = None
return packet
def format_data(self, data):
"""
Format response data, override in subclasses, defaults to returning original data.
:param packet:
:return:
"""
return data
class ApacheDirInput(HttpInput):
"""
Read file data from an Apache directory "index" HTML page.
Uses http://stackoverflow.com/questions/686147/url-tree-walker-in-python
produces=FORMAT.record. Each record contains file_name and file_data (other meta data like
date time is too fragile over different Apache servers).
"""
def __init__(self, configdict, section, produces=FORMAT.record):
HttpInput.__init__(self, configdict, section, produces)
# look for a link + a timestamp + a size ('-' for dir)
# self.parse_re = re.compile('href="([^"]*)".*(..-...-.... ..:..).*?(\d+[^\s<]*|-)')
# This appeared to be too fragile, e.g. different date formats per apache server
# default file extension to filter
self.file_ext = self.cfg.get('file_ext', 'xml')
# default regular expresion for file
self.file_reg_exp = self.cfg.get('file_reg_exp', 'href="([^"]*%s)"' % self.file_ext)
self.parse_re = re.compile(self.file_reg_exp)
self.file_list = None
self.file_index = None
if not self.url.endswith('/'):
self.url += '/'
def init(self):
"""
Read the list of files from the Apache index URL.
"""
# One time: get all files from remote Apache dir
log.info('Init: fetching file list from URL: %s ...' % self.url)
html = self.read_from_url(self.url)
self.file_list = self.parse_re.findall(html)
log.info('Found %4d file' % len(self.file_list) + 's' * (len(self.file_list) != 1))
def next_file(self):
"""
Return a tuple (name, date, size) with next file info.
:return tuple:
"""
if self.file_index is None:
self.file_index = -1
# At last file tuple ?
if self.no_more_files():
return None
self.file_index += 1
return self.file_list[self.file_index]
def no_more_files(self):
"""
More files left?.
:return Boolean:
"""
return self.file_index == len(self.file_list) - 1
def read(self, packet):
"""
Read the data from the URL.
:param packet:
:return:
"""
file_name = self.next_file()
file_name = self.filter_file(file_name)
# All files done?
if file_name is None and self.no_more_files() is True:
packet.set_end_of_stream()
log.info("EOF Apache dir files done, file_index=%d" % self.file_index)
return packet
if file_name is None:
return packet
# Process next file
url = self.url + file_name
log.info("Reading file_index=%d, file_name=%s " % (self.file_index, file_name))
# Create record from file_name and file content
packet.data = dict(file_name=file_name, file_data=self.read_from_url(url))
return packet
def filter_file(self, file_name):
"""
Filter the file_name, e.g. to suppress reading, default: return file_name.
:param file_name:
:return string or None:
"""
return file_name
|
sebastic/stetl
|
stetl/inputs/httpinput.py
|
Python
|
gpl-3.0
| 6,474
|
# -*- coding: utf-8 -*-
"""
oaepub clearcache
Clear out portions or all of OpenAccess_EPUB's cache
Usage:
clearcache [options] COMMAND
Options:
-h --help show this help message and exit
-v --version show program version and exit
Clearcache Specific Options:
-d --dry-run Will print out what it would delete, instead of actually
deleting anything. Good idea to try this once before you
trust the command (because you are cautious and wise)
Recognized commands for oaepub clearcache are:
all Delete all cached data: images, logs
images Delete only the cached image files
logs Delete only the cached log files
manual Print out the cache location then exit
Remember that you can disable any or all caching. Caching is very helpful for
development, but may not be necessary for all users. If you want to manually
alter your cache, you can use 'oaepub clearcache manual' to tell you where the
cache is located.
"""
#Standard Library modules
import os
import platform
import shutil
import subprocess
import sys
#Non-Standard Library modules
from docopt import docopt
#OpenAccess_EPUB modules
from openaccess_epub._version import __version__
import openaccess_epub.utils
def empty_it(path, dry_run):
if dry_run:
print('Deleting all contents of {0}'.format(path))
return
for root, dirs, files in os.walk(path):
for f in files:
os.remove(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def main(argv=None):
args = docopt(__doc__,
argv=argv,
version='OpenAccess_EPUB v.' + __version__,
options_first=True)
config = openaccess_epub.utils.load_config_module()
cache_loc = openaccess_epub.utils.cache_location()
if args['COMMAND'] == 'manual':
# We'll *try* to launch a file browser, at least print cache location
plat = platform.platform()
if plat.startswith('Windows'):
os.startfile(cache_loc)
elif plat.startswith('Darwin'):
subprocess.Popen(['open', cache_loc])
elif plat.startswith('Linux'):
try:
subprocess.Popen(['xdg-open', cache_loc])
except OSError:
pass
sys.exit('The cache is located at {0}'.format(cache_loc))
elif args['COMMAND'] == 'logs':
empty_it(os.path.join(cache_loc, 'logs'), dry_run=args['--dry-run'])
sys.exit()
elif args['COMMAND'] == 'images':
empty_it(config.image_cache, dry_run=args['--dry-run'])
sys.exit()
elif args['COMMAND'] == 'all':
empty_it(os.path.join(cache_loc, 'logs'), dry_run=args['--dry-run'])
empty_it(config.image_cache, dry_run=args['--dry-run'])
sys.exit()
if __name__ == '__main__':
main()
|
SavinaRoja/OpenAccess_EPUB
|
src/openaccess_epub/commands/clearcache.py
|
Python
|
gpl-3.0
| 2,881
|
#
# Copyright (c) 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
this rollout worker:
- restores a model from disk
- evaluates a predefined number of episodes
- contributes them to a distributed memory
- exits
"""
import time
import os
from rl_coach.base_parameters import TaskParameters, DistributedCoachSynchronizationType
from rl_coach.checkpoint import CheckpointStateFile, CheckpointStateReader
from rl_coach.data_stores.data_store import SyncFiles
from rl_coach.core_types import RunPhase
def wait_for(wait_func, data_store=None, timeout=10):
"""
block until wait_func is true
"""
for i in range(timeout):
if data_store:
data_store.load_from_store()
if wait_func():
return
time.sleep(10)
# one last time
if wait_func():
return
raise ValueError((
'Waited {timeout} seconds, but condition timed out'
).format(
timeout=timeout,
))
def wait_for_trainer_ready(checkpoint_dir, data_store=None, timeout=10):
"""
Block until trainer is ready
"""
def wait():
return os.path.exists(os.path.join(checkpoint_dir, SyncFiles.TRAINER_READY.value))
wait_for(wait, data_store, timeout)
def rollout_worker(graph_manager, data_store, num_workers, task_parameters):
"""
wait for first checkpoint then perform rollouts using the model
"""
if (
graph_manager.agent_params.algorithm.distributed_coach_synchronization_type
== DistributedCoachSynchronizationType.SYNC
):
timeout = float("inf")
else:
timeout = None
# this could probably be moved up into coach.py
graph_manager.create_graph(task_parameters)
data_store.load_policy(graph_manager, require_new_policy=False, timeout=60)
with graph_manager.phase_context(RunPhase.TRAIN):
# this worker should play a fraction of the total playing steps per rollout
graph_manager.reset_internal_state(force_environment_reset=True)
act_steps = (
graph_manager.agent_params.algorithm.num_consecutive_playing_steps
/ num_workers
)
for i in range(graph_manager.improve_steps / act_steps):
if data_store.end_of_policies():
break
graph_manager.act(
act_steps,
wait_for_full_episodes=graph_manager.agent_params.algorithm.act_for_full_episodes,
)
data_store.load_policy(graph_manager, require_new_policy=True, timeout=timeout)
|
NervanaSystems/coach
|
rl_coach/rollout_worker.py
|
Python
|
apache-2.0
| 3,063
|
from django.shortcuts import render
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
import import_data
from django.contrib.auth.decorators import login_required
from django.utils import timezone
import time
# Create your views here.
from .models import Environment, Service, Severity, Outage
from .forms import OutageForm
def outage_detail(request, o_id):
out = Outage.objects.get(id=o_id)
context = {'out': out}
return render(request, 'outage/out_detail.html', context)
def setup(request):
prod_env = ('RMM', 'Continuity', 'HelpDesk', 'NOC', 'SOC')
non_prod_env = ('Boston', 'Cranberry (non-HelpDesk)', 'Houston', 'Mumbai (non-NOC)', 'Pune (non-SOC)', 'London', 'Sydney')
serv = ('Internet', 'Firewall', 'VPN', 'Wifi', 'Phones', 'Conference Room', 'VMware Engineering Environment', 'Applications')
sev = (('Production Outage', 1), ('Production Degraded', 2), ('Non-Production Outage', 3), ('Non-Production Degraded', 4), ('Redundant Service Outage or Degregation', 5))
e = Environment.objects.all()
e.delete()
s = Service.objects.all()
s.delete()
s = Severity.objects.all()
s.delete()
for x in prod_env:
e = Environment(name=x, prod=True)
e.save()
for x in non_prod_env:
e = Environment(name=x, prod=False)
e.save()
for x in serv:
s = Service(name=x)
s.save()
for x in sev:
s = Severity(name=x[0], value=int(x[1]))
s.save()
return HttpResponseRedirect(reverse('outage:dash'))
def load_x(request):
de = Outage.objects.all()
de.delete()
d = import_data.d()
for r in d:
env = Environment.objects.get(name=r[1])
serv = Service.objects.get(name=r[2])
sev = Severity.objects.get(value=int(r[10]))
o = Outage(description=r[0], environ=env, service=serv, sev=sev, began=r[3], detected=r[4], end=r[5]
, tz=r[6], owner=r[14], rca=r[15], status='Resolved')
o.save()
return HttpResponseRedirect(reverse('outage:dash'))
def dash(request):
o = Outage.objects.order_by('-began')
context = {'outage': o}
return render(request, 'outage/dash.html', context)
@login_required(login_url='/users/login/')
def new_outage(request):
if request.method != 'POST':
date = time.strftime("%Y-%m-%d")
now_test = timezone.now()
u = request.user
form = OutageForm(initial = {'detected': now_test, 'auth_owner': u})
else:
form = OutageForm(data=request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('outage:dash'))
context = {'form': form}
return render(request, 'outage/new_outage.html', context)
@login_required(login_url='/users/login/')
def edit_outage(request, o_id):
if request.method != 'POST':
track = Outage.objects.get(id=o_id)
form = OutageForm(instance=track)
else:
track = Outage.objects.get(id=o_id)
form = OutageForm(request.POST, instance=track)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('outage:outage_detail', args=[o_id]))
context = {'form' : form, 'o_id' : o_id}
return render(request, 'tracker/edit_outage.html', context)
def out_csv_dump(request):
track = Outage.objects.all()
csv_dump = open('csv_dump.csv', 'w')
csv_dump.write('Description,Environment,Service,Severity,Began,Ended,TimeZone\n')
for t in track:
csv_dump.write(t.description+','
+t.environ.name+','
+t.service.name+','
+str(t.sev.value)+','
+t.began.strftime('%m/%d/%Y %H:%M:%S')+','
+t.end.strftime('%m/%d/%Y %H:%M:%S')+','
+t.tz+','
+'\n')
csv_dump.close()
csv_dump = open('csv_dump.csv', 'r')
response = HttpResponse(csv_dump, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename="out_csv_dump.csv"'
return response
|
inspectorbean/spat
|
outage/views.py
|
Python
|
gpl-3.0
| 4,101
|
"""
Views which allow users to create and activate accounts.
"""
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME, login as auth_login
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.sites.models import get_current_site
from django.shortcuts import redirect
from django.utils.decorators import method_decorator
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django.utils.http import is_safe_url
from braces.views import JSONResponseMixin
from registration import signals
from registration.forms import RegistrationForm
class _RequestPassingFormView(FormView):
"""
A version of FormView which passes extra arguments to certain
methods, notably passing the HTTP request nearly everywhere, to
enable finer-grained processing.
"""
def get(self, request, *args, **kwargs):
# Pass request to get_form_class and get_form for per-request
# form control.
form_class = self.get_form_class(request)
form = self.get_form(form_class)
return self.render_to_response(self.get_context_data(form=form))
def post(self, request, *args, **kwargs):
# Pass request to get_form_class and get_form for per-request
# form control.
form_class = self.get_form_class(request)
form = self.get_form(form_class)
if form.is_valid():
# Pass request to form_valid.
return self.form_valid(request, form)
else:
return self.form_invalid(form)
def get_form_class(self, request=None):
return super(_RequestPassingFormView, self).get_form_class()
def get_form_kwargs(self, request=None, form_class=None):
return super(_RequestPassingFormView, self).get_form_kwargs()
def get_initial(self, request=None):
return super(_RequestPassingFormView, self).get_initial()
def get_success_url(self, request=None, user=None):
# We need to be able to use the request and the new user when
# constructing success_url.
return super(_RequestPassingFormView, self).get_success_url()
def form_valid(self, form, request=None):
return super(_RequestPassingFormView, self).form_valid(form)
def form_invalid(self, form, request=None):
return super(_RequestPassingFormView, self).form_invalid(form)
class RegistrationView(_RequestPassingFormView):
"""
Base class for user registration views.
"""
disallowed_url = 'registration_disallowed'
form_class = RegistrationForm
http_method_names = ['get', 'post', 'head', 'options', 'trace']
success_url = None
template_name = 'registration/registration_form.html'
def dispatch(self, request, *args, **kwargs):
"""
Check that user signup is allowed before even bothering to
dispatch or do other processing.
"""
if not self.registration_allowed(request):
return redirect(self.disallowed_url)
return super(RegistrationView, self).dispatch(request, *args, **kwargs)
def form_valid(self, request, form):
new_user = self.register(request, **form.cleaned_data)
success_url = self.get_success_url(request, new_user)
# success_url may be a simple string, or a tuple providing the
# full argument set for redirect(). Attempting to unpack it
# tells us which one it is.
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
def registration_allowed(self, request):
"""
Override this to enable/disable user registration, either
globally or on a per-request basis.
"""
return True
def register(self, request, **cleaned_data):
"""
Implement user-registration logic here. Access to both the
request and the full cleaned_data of the registration form is
available here.
"""
raise NotImplementedError
class ActivationView(TemplateView):
"""
Base class for user activation views.
"""
http_method_names = ['get']
template_name = 'registration/activate.html'
def get(self, request, *args, **kwargs):
activated_user = self.activate(request, *args, **kwargs)
if activated_user:
signals.user_activated.send(sender=self.__class__,
user=activated_user,
request=request)
success_url = self.get_success_url(request, activated_user)
try:
to, args, kwargs = success_url
return redirect(to, *args, **kwargs)
except ValueError:
return redirect(success_url)
else:
return self.invalid_code(request, *args, **kwargs)
def invalid_code(self, request, *args, **kwargs):
return super(ActivationView, self).get(request, *args, **kwargs)
def activate(self, request, *args, **kwargs):
"""
Implement account-activation logic here.
"""
raise NotImplementedError
def get_success_url(self, request, user):
raise NotImplementedError
class LoginView(FormView):
""" Class-based version of django.contrib.auth.login"""
form_class = AuthenticationForm
redirect_field_name = REDIRECT_FIELD_NAME
template_name = 'registration/login.html'
@method_decorator(csrf_protect)
@method_decorator(sensitive_post_parameters())
@method_decorator(never_cache)
def dispatch(self, *args, **kwargs):
return super(LoginView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
"""
The user has provided valid credentials (this was checked in AuthenticationForm.is_valid()). So now we
can check the test cookie stuff and log him in.
"""
self.check_and_delete_test_cookie()
user = form.get_user()
auth_login(self.request, form.get_user())
return super(LoginView, self).form_valid(form)
def form_invalid(self, form):
"""
The user has provided invalid credentials (this was checked in AuthenticationForm.is_valid()). So now we
set the test cookie again and re-render the form with errors.
"""
self.set_test_cookie()
return super(LoginView, self).form_invalid(form)
def get_success_url(self):
if self.success_url:
redirect_to = self.success_url
else:
redirect_to = self.request.REQUEST.get(self.redirect_field_name, '')
if not is_safe_url(url=redirect_to, host=self.request.get_host()):
redirect_to = settings.LOGIN_REDIRECT_URL
return redirect_to
def set_test_cookie(self):
self.request.session.set_test_cookie()
def check_and_delete_test_cookie(self):
if self.request.session.test_cookie_worked():
self.request.session.delete_test_cookie()
return True
return False
def get_context_data(self, **kwargs):
context = super(LoginView, self).get_context_data(**kwargs)
current_site = get_current_site(self.request)
extra_context = {
self.redirect_field_name: self.get_success_url(),
'site_name': current_site.name,
}
context.update(extra_context)
return context
def get(self, request, *args, **kwargs):
"""
Same as django.views.generic.edit.ProcessFormView.get(), but adds test cookie stuff
"""
self.set_test_cookie()
return super(LoginView, self).get(request, *args, **kwargs)
|
futurecolors/django-registration
|
registration/views.py
|
Python
|
bsd-3-clause
| 7,994
|
__author__ = 'ryanplyler'
def sayhi(config):
error = None
try:
server_output = "Executing action 'sayhi()'"
response = "HI THERE!"
except:
error = 1
return server_output, response, error
|
grplyler/netcmd
|
netcmd_actions.py
|
Python
|
gpl-2.0
| 231
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#Copyright 2014 José Manuel Abuín Mosquera <josemanuel.abuin@usc.es>
#
#This file is part of Perldoop.
#
#Perldoop is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Perldoop is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Perldoop. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import re
from Status import *
#Automata function
def procesaLinha(linha):
if(getComentario().replace("\n","") == ignoreLine):
return ''
#Perl code followed by comment
expresionComentario = re.compile(r'^([\w\W\d ]*)([\#]+)'+comentarioLinha+'[\s]*([\w\W\d ]*)$').match(linha)
#variable assign to: operation, concatenations...
expresionAsignacionVariableParentese = re.compile(r'^[\s]*[\(]{1}[\s]*(my){0,1}[\s]*[\$@]{0,1}([\w]+[\w\d\W]*)[\s]*[\)]{1}[\s ]*(={1})[\s]*([\w\W\d]+)[\s]*(;)').match(linha)
expresionAsignacionVariable = re.compile(r'^[\s]*[\(]{0,1}[\s]*(my){0,1}[\s]*[\$@]{0,1}([\w]+[\w\d\W]*)[\s]*[\)]{0,1}[\s ]*(={1})[\s ]*([\w\W\d]+)[\s]*(;){1}').match(linha)
expresionAsignacionVariableSenPuntoComa = re.compile(r'^[\s]*[\(]{0,1}[\s]*(my){0,1}[\s]*[\$@]{0,1}([\w]+[\w\d\W]*)[\s]*[\)]{0,1}[\s ]*(={1})[\s ]*([\w\W\d]+)[\s]*').match(linha)
expresionDeclaracionVariable = re.compile(r'^[\s]*[\(]{0,1}[\s]*(my){1}[\s]*[\$@]{1}([\w]+[\w\d\W]*)[\s]*[\)]{0,1}[\s]*(;){1}').match(linha)
expresionDeclaracionHash = re.compile(r'^[\s]*[\(]{0,1}[\s]*(my){1}[\s]*[%]{1}([\w]+[\w\d\W]*)[\s]*[\)]{0,1}[\s]*(;){1}').match(linha)
#Expressions of the type: if (Condition/s) {
expresionIf = re.compile(r'^[\s]*(if|elsif)[\s]*([\(]{1})[\s]*([\w\d\W ]+)[\s]*([\)]{1})[\s]*(\{){0,1}').match(linha)
#Expressions of the type: while (Condition/s)
expresionWhile = re.compile(r'^[\s]*(while)[\s]*([\(]{1})[\s]*([\w\d\W ]+)[\s]*([\)]{1})[\s]*(\{){0,1}').match(linha)
#Expresions do tipo for (Condicion/s)
expresionFor = re.compile(r'[\s]*for[\s]*([\(]{1})[\s]*([\w\d\W ]+)[\s]*([\)]{1})[\s]*(\{){0,1}[\s]*').match(linha)
#Expresions do tipo foreach (Condicion/s)
#expresionForeach = re.compile(r'[\s]*foreach[\s]*([\(]{1})[\s]*([\w\d\W ]+)[\s]*([\)]{1})[\s]*(\{){0,1}[\s]*').match(linha)
expresionForeach = re.compile(r'[\s]*foreach[\s]*[\s]*([\w\d\W ]+)[\s]*').match(linha)
#Expresions do tipo for my $variable
#Expresion de concatenacion de variables e strings
expresionOperacionConcat = re.compile(r'^[\s]*[\$]{0,1}([\w\d\W]+)[\s]*([.]){1}[\s]*[\$]{0,1}([\w\d\W]+)[\s]*([;]{1})[\s]*').match(linha)
#Expresion de operacion aritmetica
expresionOperacion = re.compile(r'^[\s]*[\$]{0,1}([\w\d\W]+)[\s]*([+\-\*\/]){1}[\s]*[\$]{0,1}([\w\d\W]+)[\s]*([;]{1})[\s]*').match(linha)
#Expresion regular
expresionRegular = re.compile(r'^[\s]*[\$]*([\w\d_.\[\]\$]+)[\s]*(=[\s]*~)[\s]*([\w\d\W]]+)[\s]*').match(linha)
#Expresion para insercion ou modificacion nunha taboa hash
expresionHashAsignacion = re.compile(r'[\s]*[\$]{0,1}([\w]+[\w\d\W]*)[\{]([\w\d\W]+)[\}][\s]*[=]{1}[\s]*([\w\W\d]+)[\t\s]*(;)[\s]*').match(linha)
#Idem pero para un hash con dobre key
expresionHashAsignacionDoble = re.compile(r'[\s]*[\$]{0,1}([\w]+[\w\d\W]*)[\{]([\w\d\W]+)[\}][\{]([\w\d\W]+)[\}][\s]*[=]{1}[\s]*([\w\W\d]+)[\t\s]*(;)[\s]*').match(linha)
#Expresion para procesar unha chamada a unha funcion
expresionFuncion = re.compile(r'[\s]*([\w][\w\d_]*)[\s]*\([\s]*([\w\d\W]+)\)[\s]*([;]{1})[\s\t]*').match(linha)
#Expresion que chama a unha funcion sen parenteses
expresionFuncionSenParentese = re.compile(r'[\s]*(delete|undef|print)[\s]*([\w\d\W]+)[\s]*([;]{1})[\s]*').match(linha)
#Expresion simbolo sencillo
expresionSimbolo = re.compile(r'[\s]*([\W])[\s]*$').match(linha)
#Expresion incremento
expresionIncremento = re.compile(r'[\s]*[\$]{1}([\w]+[\w\d\W]*)([+]{2})[\s]*([;]{1})').match(linha)
if(expresionComentario):
comentario = expresionComentario.group(3).replace('\n','')
if(comentario == inicioProcesadoPerl):
setProcessingStatus(True)
return ''
if(comentario == finProcesadoPerl):
setProcessingStatus(False)
return ''
if(comentario == headerInit):
setReadingHeaderStatus(True)
return ''
if(comentario == headerEnd):
setReadingHeaderStatus(False)
return ''
if(getReadingHeaderStatus()): #The header is been readed
expresionOpcion = re.compile(r'[\s]*([\#]+)'+comentarioLinha+'([\w\d\W]+)[\s]*(=)[\s]*([\w\d\W]+)[\s]*').match(linha)
if(getDebugMode()):
print 'Reading header option :: '+linha
nomeOpcion = ""
valorOpcion = ""
if (expresionOpcion):
nomeOpcion = expresionOpcion.group(2)
valorOpcion = expresionOpcion.group(4)
#options[nomeOpcion] = valorOpcion
setHeaderOption(nomeOpcion, valorOpcion)
return ''
else:
print 'ERROR: Bad expression at header'
return ''
if(getProcessingStatus()):
if(getDebugMode()):
print 'procesaLinha function :: '+linha
if(expresionComentario):
if(getDebugMode()):
print 'Processing expression with comment :: '+expresionComentario.group(3)
setComentario(expresionComentario.group(3))
return procesaLinha(expresionComentario.group(1))
#Condicional if
elif(expresionIf):
if(getDebugMode()):
print 'procesaLinha function :: expresionIf :: '+expresionIf.group(0)
if(expresionIf.group(1) == 'if'):
cadeaDevolver = 'if ('+procesaInteriorIf(expresionIf.group(3))+') {'
elif(expresionIf.group(1) == 'elsif'):
cadeaDevolver = 'else if ('+procesaInteriorIf(expresionIf.group(3))+') {'
else:
cadeaDevolver = 'ERRoR'
return cadeaDevolver
elif(expresionWhile):
cadeaDevolver = ""
if(getDebugMode()):
print 'procesaLinha function :: expresionWhile :: '+expresionWhile.group(0)
if(not cadeaStdin in expresionWhile.group(3)):
cadeaDevolver = 'while ('+procesaInteriorIf(expresionWhile.group(3))+') {'
else:
if(getDebugMode()):
print 'procesaLinha function :: expresionWhile :: con STDIN'
if(getConfigOption('hadoop')):
if(getComentario()== mapStart):
partesInternas = expresionWhile.group(3).split("=")
cadeaDevolver = procesaVariable(partesInternas[0])+ " = value.toString();"
return cadeaDevolver
elif(getComentario()==reduceStart):
partesInternas = expresionWhile.group(3).split("=")
cadeaDevolver = "for (Text val : values) {\n"
cadeaDevolver = cadeaDevolver + procesaVariable(partesInternas[0])+ " = val.toString();"
return cadeaDevolver
else:
partesInternas = expresionWhile.group(3).split("=")
cadeaDevolver = 'BufferedReader br = new BufferedReader(new InputStreamReader(System.in));\n'
cadeaDevolver = cadeaDevolver + "while(("+procesaVariable(partesInternas[0])+"=br.readLine())!=null){"
return cadeaDevolver
return cadeaDevolver
elif(expresionForeach):
if(getDebugMode()):
print 'procesaLinha function :: Procesando expresion de bucle FOREACH'
cadeaDevolver = procesaInteriorForeach(linha)
return cadeaDevolver
elif(expresionFor):
cadeaDevolver = ""
if(getDebugMode()):
print 'procesaLinha function :: Procesando expresion de bucle FOR'
partesCondicionFor = expresionFor.group(2).split(";")
if(len(partesCondicionFor)<3):
if(".." in expresionFor.group(2)):
indices = expresionFor.group(2).split("..")
cadeaDevolver = "for (int varRangosFP = "+indices[0]+"; varRangosFP <="+indices[1]+";varRangosFP++) {"
else:
cadeaDevolver = 'for ('+procesaLinha(partesCondicionFor[0])+';'+procesaInteriorIf(partesCondicionFor[1])+';'+procesaLinha(partesCondicionFor[2])+') {'
return cadeaDevolver
elif(expresionFuncion):
if(getDebugMode()):
print 'procesaLinha function :: Procesando chamada a funcion'
if(expresionFuncion.group(1) == 'lowercase'):
return procesaVariable(expresionFuncion.group(2))+'.toLowerCase()'+expresionFuncion.group(3)
elif(expresionFuncion.group(1) == 'chomp'):
return procesaVariable(expresionFuncion.group(2))+' = '+procesaVariable(expresionFuncion.group(2))+".trim()"+expresionFuncion.group(3)
elif(expresionFuncion.group(1) == 'lc'):#TODO: Change procesaVariable function and put the function that processes functions arguments instead.
return procesaVariable(expresionFuncion.group(2))+'.toLowerCase()'+expresionFuncion.group(3)
elif(expresionFuncion.group(1) == 'push'):
partesPush = expresionFuncion.group(2).split(",")
if(len(partesPush)==2):
return partesPush[0].replace("@","")+".add("+procesaOperacion(partesPush[1])+");"
else:
return expresionFuncion.group(1)+' ('+procesaArgumentosFuncion(expresionFuncion.group(2))+')'+expresionFuncion.group(3)
elif(expresionFuncion.group(1) == 'printf'):
return "System.out.format("+procesaArgumentosFuncion(expresionFuncion.group(2))+')'+expresionFuncion.group(3)
else:
return expresionFuncion.group(1)+' ('+procesaArgumentosFuncion(expresionFuncion.group(2))+')'+expresionFuncion.group(3)
#Assign to hash table with double key
elif(expresionHashAsignacionDoble):
if(getDebugMode()):
print 'procesaLinha function :: Processing assign to hash table with double key'
cadeaDevolta = ''
nomeVariable = expresionHashAsignacionDoble.group(1)
key1 = procesaVariable(expresionHashAsignacionDoble.group(2))
key2 = procesaVariable(expresionHashAsignacionDoble.group(3))
valor = procesaOperacion(expresionHashAsignacionDoble.group(4))
if((getComentario() !='') and (getComentario != '\n') and(getComentario().replace('\n','') == key1String)):
cadeaDevolta += 'if (!'+nomeVariable+'.containsKey(String.valueOf('+key1+'))) {\n\t'+nomeVariable+'.put(String.valueOf('+key1+'),new Hashtable<String,String>());\n}\n'
cadeaDevolta += nomeVariable+'.get(String.valueOf('+key1+')).put('+key2+','+valor+')'+expresionHashAsignacionDoble.group(5)
return cadeaDevolta
else:
cadeaDevolta += 'if (!'+nomeVariable+'.containsKey('+key1+')) {\n\t'+nomeVariable+'.put('+key1+',new Hashtable<String,String>());\n}\n'
cadeaDevolta += nomeVariable+'.get('+key1+').put('+key2+','+valor+')'+expresionHashAsignacionDoble.group(5)
return cadeaDevolta
#Asignacion nunha taboa hash
elif(expresionHashAsignacion):
if(getDebugMode()):
print 'procesaLinha function :: Procesando asignacion a taboa Hash'
return expresionHashAsignacion.group(1)+'.put('+procesaOperacion(expresionHashAsignacion.group(2))+','+procesaOperacion(expresionHashAsignacion.group(3))+')'+expresionHashAsignacion.group(4)
elif(('=~' in linha)or('!~' in linha)):
if(getDebugMode()):
print 'procesaLinha function :: Procesando expresion regular simple'
simboloER = ''
if('=~' in linha):
simboloER = '=~'
elif('!~' in linha):
simboloER = '!~'
partes = linha.split(simboloER)
if(getDebugMode()):
print '==========================='
print partes[0]
print partes[1]
print '==========================='
if("=" in partes[0]):
partesIgualdade = partes[0].split("=")
if(len(partesIgualdade)==2):
if(getDebugMode()):
print 'procesaLinha function :: Procesando asignacion a variable con expresion regular'
if(partes[1][len(partes[1])-1]==';'):
partes[1] = partes[1][0:len(partes[1])-1]
elif((partes[1][len(partes[1])-2]==';')and(partes[1][len(partes[1])-1]=='\n')):
partes[1] = partes[1][0:len(partes[1])-2]
return procesaExpresionRegular(partesIgualdade[1]+" "+simboloER+" "+partes[1],partesIgualdade[0])
else:
return procesaExpresionRegular(linha)
else:
return procesaExpresionRegular(linha)
elif('+=' in linha):
partes = linha.split("+=")
nomeVariable = partes[0]
valorVariable = partes[1]
return procesaVariable(nomeVariable)+' += '+procesaOperacion(valorVariable)+';'
elif('.=' in linha):
partes = linha.split(".=")
nomeVariable = partes[0]
valorVariable = partes[1]
return procesaVariable(nomeVariable)+' += '+procesaOperacion(valorVariable)+';'
#Asignacion de variable cun valor (string, int ou float)
elif(expresionAsignacionVariableParentese):
if(getDebugMode()):
print 'procesaLinha function :: Procesando asignacion a variable con parentese :: '+expresionAsignacionVariableParentese.group(2)
if(getComentario().replace("\n","") == tipoKeyValue):
variables = expresionAsignacionVariableParentese.group(2).split(",")
variableKey = procesaVariable(variables[0])
variableValue = procesaVariable(variables[1])
linha1 = variableKey+" = key.toString();\n"
linha2 = variableValue+" = val.toString();"
return linha1+linha2
else:
nomeVariable = expresionAsignacionVariableParentese.group(2)
valorVariable = expresionAsignacionVariableParentese.group(4)
return procesaVariable(nomeVariable)+' = '+procesaOperacion(valorVariable)+';'
#Asignacion de variable cun valor (string, int ou float)
elif(expresionAsignacionVariable):
tipoDato = ''
if(getDebugMode()):
print 'procesaLinha function :: Procesando asignacion a variable :: '+expresionAsignacionVariable.group(2)
if(expresionAsignacionVariable.group(1)=='my' and getComentario()!=""):
if(getComentario().replace("\n","") == tipoString):
tipoDato = 'String'
elif(getComentario().replace("\n","") == tipoInt):
tipoDato = 'int'
elif(getComentario().replace("\n","") == tipoBoolean):
tipoDato = 'boolean'
elif(getComentario().replace("\n","") == tipoArrayString):
tipoDato = 'String[]'
elif(getComentario().replace("\n","") == tipoDouble):
tipoDato = 'double'
elif(getComentario().replace("\n","") == tipoLong):
tipoDato = 'long'
else:
tipoDato = 'String'
nomeVariable = expresionAsignacionVariable.group(2)
valorVariable = expresionAsignacionVariable.group(4)
if(getComentario().replace("\n","") == castInt):
return procesaVariable(nomeVariable)+' = Integer.parseInt('+procesaOperacion(valorVariable)+')'+expresionAsignacionVariable.group(5)
elif(getComentario().replace("\n","") == castString):
return procesaVariable(nomeVariable)+' = String.valueOf('+procesaOperacion(valorVariable)+')'+expresionAsignacionVariable.group(5)
return tipoDato+' '+procesaVariable(nomeVariable)+' = '+procesaOperacion(valorVariable)+expresionAsignacionVariable.group(5)
elif(expresionAsignacionVariableSenPuntoComa):
tipoDato = ''
if(getDebugMode()):
print 'procesaLinha function :: Procesando asignacion a variable sen punto coma :: '+expresionAsignacionVariableSenPuntoComa.group(2)
if(expresionAsignacionVariableSenPuntoComa.group(1)=='my' and getComentario()!=""):
if(getComentario().replace("\n","") == tipoString):
tipoDato = 'String'
elif(getComentario().replace("\n","") == tipoInt):
tipoDato = 'int'
elif(getComentario().replace("\n","") == tipoBoolean):
tipoDato = 'boolean'
elif(getComentario().replace("\n","") == tipoArrayString):
tipoDato = 'String[]'
elif(getComentario().replace("\n","") == tipoDouble):
tipoDato = 'double'
elif(getComentario().replace("\n","") == tipoLong):
tipoDato = 'long'
else:
tipoDato = 'String'
nomeVariable = expresionAsignacionVariableSenPuntoComa.group(2)
valorVariable = expresionAsignacionVariableSenPuntoComa.group(4)
if(getComentario().replace("\n","") == castInt):
return procesaVariable(nomeVariable)+' = Integer.parseInt('+procesaOperacion(valorVariable)+')'
return tipoDato+' '+procesaVariable(nomeVariable)+' = '+procesaOperacion(valorVariable)
elif(expresionDeclaracionVariable):
if(getDebugMode()):
print 'procesaLinha function :: Procesando declaracion de variable :: '+expresionDeclaracionVariable.group(0)
tipoDato = ''
valorAsignacion = ''
if(getComentario().replace("\n","") == tipoString):
tipoDato = 'String'
elif(getComentario().replace("\n","") == tipoInt):
tipoDato = 'int'
elif(getComentario().replace("\n","") == tipoBoolean):
tipoDato = 'boolean'
elif(getComentario().replace("\n","") == tipoArrayString):
tipoDato = 'String[]'
elif(getComentario().replace("\n","") == tipoStringNull):
tipoDato = 'String'
valorAsignacion = ' = null'
elif(getComentario().replace("\n","") == tipoLong):
tipoDato = 'long'
else:
if(getDebugMode()):
print 'procesaLinha function :: Tipo non atopado :: Predefinido -> String'
tipoDato = 'String'
nomeVariable = expresionDeclaracionVariable.group(2)
return tipoDato+' '+procesaVariable(nomeVariable)+valorAsignacion+';'
elif(expresionDeclaracionHash):
if(getDebugMode()):
print 'procesaLinha function :: Procesando declaracion de taboa hash :: '+expresionDeclaracionHash.group(0)
tipoDato = 'Hashtable '
valorAsignacion = ''
if(getComentario().replace("\n","") == tipoHashStringLong):
tipoDato = tipoDato + '<String, Long>'
valorAsignacion = ' = new Hashtable<String, Long>()'
elif(getComentario().replace("\n","") == tipoHashStringInteger):
tipoDato = tipoDato + '<String, Integer>'
valorAsignacion = ' = new Hashtable<String, Integer>()'
else:
if(getDebugMode()):
print 'procesaLinha function :: Tipo non atopado :: Predefinido -> String'
tipoDato = tipoDato + '<String, String>'
valorAsignacion = ' = new Hashtable<String, String>()'
nomeVariable = expresionDeclaracionHash.group(2)
return tipoDato+' '+procesaVariable(nomeVariable)+valorAsignacion+';'
elif(expresionSimbolo):
if(getDebugMode()):
print 'procesaLinha function :: Procesando simbolo'
return expresionSimbolo.group(1)
#Chamada a unha funcion na que os argumentos van sen parentese
elif(expresionFuncionSenParentese):
if(getDebugMode()):
print 'procesaLinha function :: Procesando chamada a funcion sen parentese'
nomeFuncion = expresionFuncionSenParentese.group(1)
argumento = expresionFuncionSenParentese.group(2)+expresionFuncionSenParentese.group(3)
cadeaDevolta = ''
if(nomeFuncion == "delete"):
if(getDebugMode()):
print 'procesaLinha function :: Procesando chamada a funcion sen parentese DELETE :: '+argumento
#Obtenhense as partes do hash
expresionAuxPartesHash = re.compile(r'[\s]*[\$]{1}([\w][\w\d\W]*)[\{]{1}[\$]{1}([\w\d\W]+)[\}]{1}[\s]*([;]{0,1})[\s]*').match(argumento)
if(expresionAuxPartesHash):
cadeaDevolta = expresionAuxPartesHash.group(1)+'.remove('+expresionAuxPartesHash.group(2)+');'
else:
cadeaDevolta = procesaVariable(argumento).replace(";","")+'= "";'
elif(nomeFuncion == 'chomp'):
return procesaVariable(expresionFuncionSenParentese.group(2))+' = '+procesaVariable(expresionFuncionSenParentese.group(2))+".trim()"+expresionFuncionSenParentese.group(3)
elif(nomeFuncion == 'print'):
if(getConfigOption('hadoop')):
keyValue = argumento.split(".")
cadeaDevolta = 'context.write('+getDefaultKey()+procesaVariable(keyValue[0])+'),'+getDefaultValue()+procesaVariable(keyValue[1].replace(";",""))+'));'
else:
cadeaDevolta = 'System.out.print('+procesaOperacion(argumento).replace(";","").replace("\\+",". ")+');'
elif(nomeFuncion == 'undef'):
if(getDebugMode()):
print 'procesaLinha function :: Procesando chamada a funcion sen parentese UNDEF :: '+argumento
if((getComentario()!='')and(getComentario()!='\n')and(getComentario().replace('\n','') == tipoArrayString)):
cadeaDevolta = expresionFuncionSenParentese.group(2).replace("@","").replace("\n","").replace(";","").replace(" ","")+" = new String[text.size()];"
elif((getComentario()!='')and(getComentario()!='\n')and(getComentario().replace('\n','') == tipoArrayBoolean)):
cadeaDevolta = expresionFuncionSenParentese.group(2).replace("@","").replace("\n","").replace(";","").replace(" ","")+" = new boolean[text.size()];"
else:
cadeaDevolta = expresionFuncionSenParentese.group(2).replace("@","").replace("\n","").replace(";","").replace(" ","")+'.clear();'
return cadeaDevolta
elif(expresionIncremento):
if(getDebugMode()):
print 'procesaLinha function :: ExpresionIncremento'
return procesaVariable(expresionIncremento.group(1))+expresionIncremento.group(2)+expresionIncremento.group(3)
else:
if(getDebugMode()):
print 'procesaLinha function :: Expresion non atopada!!'
variable = linha.replace("$","").replace(" ","")
if(not('next' in variable)):
return variable
elif('next;' in variable.replace("\n","")):
return 'continue;'
else:
return ''
else:
return ''
|
citiususc/perldoop
|
src/Automata.py
|
Python
|
gpl-3.0
| 21,451
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("XGBRegressor" , "boston" , "postgresql")
|
antoinecarme/sklearn2sql_heroku
|
tests/regression/boston/ws_boston_XGBRegressor_postgresql_code_gen.py
|
Python
|
bsd-3-clause
| 130
|
import cv2
import numpy as np
import os
from math import sqrt
from constants import (
GROUND_TRUTH,
RAWDATA_FOLDER)
FONT = cv2.FONT_HERSHEY_SIMPLEX
WINDOW_NAME = "Labeling"
CTRL_PT_RADIUS = 10
class Boundary:
def __init__(self, width, height):
self.top = height
self.right = width
self.bottom = 0
self.left = 0
def contains_point(self, x, y):
return self.top >= y and self.bottom <= y and self.left <= x and self.right >= x
class ControlPoint:
def __init__(self, x, y):
self.x = x
self.y = y
self.radius = CTRL_PT_RADIUS
self.in_use = True
def draw(self, img):
cv2.circle(img, (self.x, self.y), self.radius, (255,0,0), 1)
def move_to(self, x, y):
self.x = x
self.y = y
def is_clicked(self, x, y):
dist = sqrt((self.x - x) ** 2 + (self.y - y) ** 2)
return dist <= self.radius
class Marking:
def __init__(self, point):
self.points = [point]
self.in_use = True
def add_point(self, point):
self.points.append(point)
def update(self):
self.points = [point for point in self.points if point.in_use]
if len(self.points) == 0:
self.in_use = False
def draw(self, img, debug=True):
color = (0,0,255) if debug else (255,255,255)
pts = []
for point in self.points:
if debug:
point.draw(img)
pts.append([point.x, point.y])
cv2.polylines(img, np.array([pts]), False, color)
class SceneMode:
ADD_POINT = "ADD_POINT"
ADD_MARKING = "ADD_MARKING"
EDIT_POINT = "EDIT_POINT"
DELETE_POINT = "DELETE_POINT"
class Scene:
def __init__(self, width, height, frame_idx):
self.frame_idx = frame_idx
self.mode = SceneMode.ADD_MARKING
self.points = []
self.markings = []
self.boundary = Boundary(width, height)
self.active_point = None
self.active_marking = None
self.width = width
self.height = height
self.img = np.zeros((height, width, 3), np.uint8)
def update(self):
self.img = np.zeros((self.height, self.width, 3), np.uint8)
for marking in self.markings:
marking.update()
self.points = [point for point in self.points if point.in_use]
self.markings = [
marking for marking in self.markings if marking.in_use]
def draw(self):
cv2.putText(self.img, self.mode, (50, 50), FONT, 1, (0,0,255), 2)
frame_info = "{} {}".format("Frame", self.frame_idx)
cv2.putText(self.img, frame_info, (50, 100), FONT, 1, (0,0,255), 2)
for marking in self.markings:
marking.draw(self.img)
def save_img(self, filename):
skeleton = np.zeros((self.height, self.width, 1), np.uint8)
for marking in self.markings:
marking.draw(skeleton, debug=False)
cv2.imwrite(filename, skeleton)
def change_mode(self, mode):
if len(self.markings) == 0:
self.mode = SceneMode.ADD_MARKING
else:
self.mode = mode
def mouse_handle(self, event, x, y, flags, param):
handle_functions = {
cv2.EVENT_LBUTTONDOWN: self._on_lbutton_down,
cv2.EVENT_LBUTTONUP: self._on_lbutton_up,
cv2.EVENT_MOUSEMOVE: self._on_mouse_move
}
if self.boundary.contains_point(x, y):
handle_functions.get(event, self._not_handle)(x, y)
else:
self.active_point = None
def _on_lbutton_down(self, x, y):
self._find_active_point(x, y)
if self.active_point is None:
if self.mode == SceneMode.ADD_POINT:
new_point = ControlPoint(x, y)
self.points.append(new_point)
self.active_marking.add_point(new_point)
if self.mode == SceneMode.ADD_MARKING:
new_point = ControlPoint(x, y)
new_marking = Marking(new_point)
self.points.append(new_point)
self.markings.append(new_marking)
self.active_marking = new_marking
self.mode = SceneMode.ADD_POINT
else:
if self.mode == SceneMode.DELETE_POINT:
self.active_point.in_use = False
self.active_point = None
def _find_active_point(self, x, y):
for point in self.points:
if point.is_clicked(x, y):
self.active_point = point
return
def _on_lbutton_up(self, x, y):
self.active_point = None
def _on_mouse_move(self, x, y):
if self.mode == SceneMode.EDIT_POINT and self.active_point is not None:
self.active_point.move_to(x, y)
def _not_handle(self, x, y):
pass
def merge_images(img1, img2):
fg_gray = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(fg_gray, 0, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
bg = cv2.bitwise_and(img1, img1, mask=mask_inv)
fg = cv2.bitwise_and(img2, img2, mask=mask)
return cv2.add(bg, fg)
def main():
cv2.namedWindow(WINDOW_NAME)
img_names = os.listdir(DATASET_FOLDER)
if not os.path.exists(GROUND_TRUTH):
os.makedirs(GROUND_TRUTH)
n_frames = len(img_names)
frame_idx = 0
while frame_idx in range(n_frames):
img = cv2.imread(DATASET_FOLDER + img_names[frame_idx])
height, width, _ = img.shape
scene = Scene(width, height, frame_idx)
cv2.setMouseCallback(WINDOW_NAME, scene.mouse_handle)
out_file = GROUND_TRUTH + img_names[frame_idx]
while True:
scene.update()
scene.draw()
fg = scene.img
frame = merge_images(img, fg)
cv2.imshow(WINDOW_NAME, frame)
key = 0xFF & cv2.waitKey(1)
if key == 27:
exit()
elif key == ord("1"):
scene.change_mode(SceneMode.ADD_MARKING)
elif key == ord("2"):
scene.change_mode(SceneMode.ADD_POINT)
elif key == ord("3"):
scene.change_mode(SceneMode.DELETE_POINT)
elif key == ord("4"):
scene.change_mode(SceneMode.EDIT_POINT)
elif key == ord("w"):
frame_idx += 1
break
elif key == ord("q"):
frame_idx -= 1
break
elif key == ord("e"):
frame_idx += 1
scene.save_img(out_file)
break
if __name__ == "__main__":
main()
|
trangnm58/idrec
|
localization_cnn/create_ground_truth.py
|
Python
|
mit
| 5,533
|
"""Parse metrics json encoded protobuf list.
Decode json encoded form of a List of MetricRecord protobufs
Definition of record:
https://github.com/wandb/client/blob/master/wandb/proto/wandb_internal.proto
Encoder function:
https://github.com/wandb/client/blob/master/wandb/sdk/lib/proto_util.py
Example:
{'loss': 'global_step', 'acc': 'global_step', 'v1': 'other_step'}
"""
_SAMPLE_METRIC_LIST = [
{"1": "global_step", "6": [2]},
{"1": "loss", "5": 1, "6": [1], "7": [1, 2, 3, 4], "8": 2},
{"1": "acc", "5": 1, "6": [1], "7": [1, 2, 3, 4], "8": 2},
{"1": "other_step", "6": [2]},
{"1": "v1", "5": 4, "6": [1], "7": [1, 2, 3, 4], "8": 2},
]
def get_step_metric_dict(ml):
"""Get mapping from metric to preferred x-axis."""
nl = [m["1"] for m in ml]
md = {m["1"]: nl[m["5"] - 1] for m in ml if m.get("5")}
return md
if __name__ == "__main__":
print(get_step_metric_dict(_SAMPLE_METRIC_LIST))
|
wandb/client
|
tests/utils/parse_metrics.py
|
Python
|
mit
| 951
|
# pngsuite.py
# PngSuite Test PNGs.
"""After you import this module with "import pngsuite" use
``pngsuite.bai0g01`` to get the bytes for a particular PNG image, or
use ``pngsuite.png`` to get a dict() of them all.
"""
import binascii
import sys
def _dehex(hexbytes):
"""Liberally convert from hex string to binary string."""
# Remove all non-hexadecimal digits
#s = re.sub(br'[^a-fA-F\d]', b'', s)
# The non-hexadecimal characters are mainly newlines, so just remove those
hexbytes = hexbytes.replace(b'\n', b'')
# binscii.unhexlify works in Python 2 and Python 3 (unlike
# thing.decode('hex')).
return binascii.unhexlify(hexbytes)
# Copies of PngSuite test files taken
# from http://www.schaik.com/pngsuite/pngsuite_bas_png.html
# on 2009-02-19 by drj and converted to hex.
# Some of these are not actually in PngSuite (but maybe they should
# be?), they use the same naming scheme, but start with a capital
# letter.
png = {
'basi0g01': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002001000000012c0677
cf0000000467414d41000186a031e8965f0000009049444154789c2d8d310ec2
300c45dfc682c415187a00a42e197ab81e83b127e00c5639001363a580d8582c
65c910357c4b78b0bfbfdf4f70168c19e7acb970a3f2d1ded9695ce5bf5963df
d92aaf4c9fd927ea449e6487df5b9c36e799b91bdf082b4d4bd4014fe4014b01
ab7a17aee694d28d328a2d63837a70451e1648702d9a9ff4a11d2f7a51aa21e5
a18c7ffd0094e3511d661822f20000000049454e44ae426082
"""),
'basi0g02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002002000000016ba60d
1f0000000467414d41000186a031e8965f0000005149444154789c635062e860
00e17286bb609c93c370ec189494960631366e4467b3ae675dcf10f521ea0303
90c1ca006444e11643482064114a4852c710baea3f18c31918020c30410403a6
0ac1a09239009c52804d85b6d97d0000000049454e44ae426082
"""),
'basi0g04': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200400000001e4e6f8
bf0000000467414d41000186a031e8965f000000ae49444154789c658e5111c2
301044171c141c141c041c843a287510ea20d441c041c141c141c04191102454
03994998cecd7edcecedbb9bdbc3b2c2b6457545fbc4bac1be437347f7c66a77
3c23d60db15e88f5c5627338a5416c2e691a9b475a89cd27eda12895ae8dfdab
43d61e590764f5c83a226b40d669bec307f93247701687723abf31ff83a2284b
a5b4ae6b63ac6520ad730ca4ed7b06d20e030369bd6720ed383290360406d24e
13811f2781eba9d34d07160000000049454e44ae426082
"""),
'basi0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000001211615
be0000000467414d41000186a031e8965f000000b549444154789cb5905d0ac2
3010849dbac81c42c47bf843cf253e8878b0aa17110f214bdca6be240f5d21a5
94ced3e49bcd322c1624115515154998aa424822a82a5624a1aa8a8b24c58f99
999908130989a04a00d76c2c09e76cf21adcb209393a6553577da17140a2c59e
70ecbfa388dff1f03b82fb82bd07f05f7cb13f80bb07ad2fd60c011c3c588eef
f1f4e03bbec7ce832dca927aea005e431b625796345307b019c845e6bfc3bb98
769d84f9efb02ea6c00f9bb9ff45e81f9f280000000049454e44ae426082
"""),
'basi0g16': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002010000000017186c9
fd0000000467414d41000186a031e8965f000000e249444154789cb5913b0ec2
301044c7490aa8f85d81c3e4301c8f53a4ca0da8902c8144b3920b4043111282
23bc4956681a6bf5fc3c5a3ba0448912d91a4de2c38dd8e380231eede4c4f7a1
4677700bec7bd9b1d344689315a3418d1a6efbe5b8305ba01f8ff4808c063e26
c60d5c81edcf6c58c535e252839e93801b15c0a70d810ae0d306b205dc32b187
272b64057e4720ff0502154034831520154034c3df81400510cdf0015c86e5cc
5c79c639fddba9dcb5456b51d7980eb52d8e7d7fa620a75120d6064641a05120
b606771a05626b401a05f1f589827cf0fe44c1f0bae0055698ee8914fffffe00
00000049454e44ae426082
"""),
'basi2c08': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002008020000018b1fdd
350000000467414d41000186a031e8965f000000f249444154789cd59341aa04
210c44abc07b78133d59d37333bd89d76868b566d10cf4675af8596431a11662
7c5688919280e312257dd6a0a4cf1a01008ee312a5f3c69c37e6fcc3f47e6776
a07f8bdaf5b40feed2d33e025e2ff4fe2d4a63e1a16d91180b736d8bc45854c5
6d951863f4a7e0b66dcf09a900f3ffa2948d4091e53ca86c048a64390f662b50
4a999660ced906182b9a01a8be00a56404a6ede182b1223b4025e32c4de34304
63457680c93aada6c99b73865aab2fc094920d901a203f5ddfe1970d28456783
26cffbafeffcd30654f46d119be4793f827387fc0d189d5bc4d69a3c23d45a7f
db803146578337df4d0a3121fc3d330000000049454e44ae426082
"""),
'basi2c16': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000201002000001db8f01
760000000467414d41000186a031e8965f0000020a49444154789cd5962173e3
3010853fcf1838cc61a1818185a53e56787fa13fa130852e3b5878b4b0b03081
b97f7030070b53e6b057a0a8912bbb9163b9f109ececbc59bd7dcf2b45492409
d66f00eb1dd83cb5497d65456aeb8e1040913b3b2c04504c936dd5a9c7e2c6eb
b1b8f17a58e8d043da56f06f0f9f62e5217b6ba3a1b76f6c9e99e8696a2a72e2
c4fb1e4d452e92ec9652b807486d12b6669be00db38d9114b0c1961e375461a5
5f76682a85c367ad6f682ff53a9c2a353191764b78bb07d8ddc3c97c1950f391
6745c7b9852c73c2f212605a466a502705c8338069c8b9e84efab941eb393a97
d4c9fd63148314209f1c1d3434e847ead6380de291d6f26a25c1ebb5047f5f24
d85c49f0f22cc1d34282c72709cab90477bf25b89d49f0f351822297e0ea9704
f34c82bc94002448ede51866e5656aef5d7c6a385cb4d80e6a538ceba04e6df2
480e9aa84ddedb413bb5c97b3838456df2d4fec2c7a706983e7474d085fae820
a841776a83073838973ac0413fea2f1dc4a06e71108fda73109bdae48954ad60
bf867aac3ce44c7c1589a711cf8a81df9b219679d96d1cec3d8bbbeaa2012626
df8c7802eda201b2d2e0239b409868171fc104ba8b76f10b4da09f6817ffc609
c413ede267fd1fbab46880c90f80eccf0013185eb48b47ba03df2bdaadef3181
cb8976f18e13188768170f98c0f844bb78cb04c62ddac59d09fc3fa25dfc1da4
14deb3df1344f70000000049454e44ae426082
"""),
'basi3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000133a3ba
500000000467414d41000186a031e8965f00000300504c5445224400f5ffed77
ff77cbffff110a003a77002222ffff11ff110000222200ffac5566ff66ff6666
ff01ff221200dcffffccff994444ff005555220000cbcbff44440055ff55cbcb
00331a00ffecdcedffffe4ffcbffdcdc44ff446666ff330000442200ededff66
6600ffa444ffffaaeded0000cbcbfefffffdfffeffff0133ff33552a000101ff
8888ff00aaaa010100440000888800ffe4cbba5b0022ff22663200ffff99aaaa
ff550000aaaa00cb630011ff11d4ffaa773a00ff4444dc6b0066000001ff0188
4200ecffdc6bdc00ffdcba00333300ed00ed7300ffff88994a0011ffff770000
ff8301ffbabafe7b00fffeff00cb00ff999922ffff880000ffff77008888ffdc
ff1a33000000aa33ffff009900990000000001326600ffbaff44ffffffaaff00
770000fefeaa00004a9900ffff66ff22220000998bff1155ffffff0101ff88ff
005500001111fffffefffdfea4ff4466ffffff66ff003300ffff55ff77770000
88ff44ff00110077ffff006666ffffed000100fff5ed1111ffffff44ff22ffff
eded11110088ffff00007793ff2200dcdc3333fffe00febabaff99ffff333300
63cb00baba00acff55ffffdcffff337bfe00ed00ed5555ffaaffffdcdcff5555
00000066dcdc00dc00dc83ff017777fffefeffffffcbff5555777700fefe00cb
00cb0000fe010200010000122200ffff220044449bff33ffd4aa0000559999ff
999900ba00ba2a5500ffcbcbb4ff66ff9b33ffffbaaa00aa42880053aa00ffaa
aa0000ed00babaffff1100fe00000044009999990099ffcc99ba000088008800
dc00ff93220000dcfefffeaa5300770077020100cb0000000033ffedff00ba00
ff3333edffedffc488bcff7700aa00660066002222dc0000ffcbffdcffdcff8b
110000cb00010155005500880000002201ffffcbffcbed0000ff88884400445b
ba00ffbc77ff99ff006600baffba00777773ed00fe00003300330000baff77ff
004400aaffaafffefe000011220022c4ff8800eded99ff99ff55ff002200ffb4
661100110a1100ff1111dcffbabaffff88ff88010001ff33ffb98ed362000002
a249444154789c65d0695c0b001806f03711a9904a94d24dac63292949e5a810
d244588a14ca5161d1a1323973252242d62157d12ae498c8124d25ca3a11398a
16e55a3cdffab0ffe7f77d7fcff3528645349b584c3187824d9d19d4ec2e3523
9eb0ae975cf8de02f2486d502191841b42967a1ad49e5ddc4265f69a899e26b5
e9e468181baae3a71a41b95669da8df2ea3594c1b31046d7b17bfb86592e4cbe
d89b23e8db0af6304d756e60a8f4ad378bdc2552ae5948df1d35b52143141533
33bbbbababebeb3b3bc9c9c9c6c6c0c0d7b7b535323225a5aa8a02024a4bedec
0a0a2a2bcdcd7d7cf2f3a9a9c9cdcdd8b8adcdd5b5ababa828298982824a4ab2
b21212acadbdbc1414e2e24859b9a72730302f4f49292c4c57373c9c0a0b7372
8c8c1c1c3a3a92936d6dfdfd293e3e26262a4a4eaea2424b4b5fbfbc9c323278
3c0b0ba1303abaae8ecdeeed950d6669a9a7a7a141d4de9e9d5d5cdcd2229b94
c572716132f97cb1d8db9bc3110864a39795d9db6b6a26267a7a9a98d4d6a6a7
cb76090ef6f030354d4d75766e686030545464cb393a1a1ac6c68686eae8f8f9
a9aa4644c8b66d6e1689dcdd2512a994cb35330b0991ad9f9b6b659596a6addd
d8282fafae5e5323fb8f41d01f76c22fd8061be01bfc041a0323e1002c81cd30
0b9ec027a0c930014ec035580fc3e112bc069a0b53e11c0c8095f00176c163a0
e5301baec06a580677600ddc05ba0f13e120bc81a770133ec355a017300d4ec2
0c7800bbe1219c02fa08f3e13c1c85dbb00a2ec05ea0dff00a6ec15a98027360
070c047a06d7e1085c84f1b014f6c03fa0b33018b6c0211801ebe018fc00da0a
6f61113c877eb01d4ec317a085700f26c130f80efbe132bc039a0733e106fc81
f7f017f6c10aa0d1300a0ec374780943e1382c06fa0a9b60238c83473016cec0
02f80f73fefe1072afc1e50000000049454e44ae426082
"""),
'basi6a08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200806000001047d4a
620000000467414d41000186a031e8965f0000012049444154789cc595414ec3
3010459fa541b8bbb26641b8069b861e8b4d12c1c112c1452a710a2a65d840d5
949041fc481ec98ae27c7f3f8d27e3e4648047600fec0d1f390fbbe2633a31e2
9389e4e4ea7bfdbf3d9a6b800ab89f1bd6b553cfcbb0679e960563d72e0a9293
b7337b9f988cc67f5f0e186d20e808042f1c97054e1309da40d02d7e27f92e03
6cbfc64df0fc3117a6210a1b6ad1a00df21c1abcf2a01944c7101b0cb568a001
909c9cf9e399cf3d8d9d4660a875405d9a60d000b05e2de55e25780b7a5268e0
622118e2399aab063a815808462f1ab86890fc2e03e48bb109ded7d26ce4bf59
0db91bac0050747fec5015ce80da0e5700281be533f0ce6d5900b59bcb00ea6d
200314cf801faab200ea752803a8d7a90c503a039f824a53f4694e7342000000
0049454e44ae426082
"""),
'basn0g01': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002001000000005b0147
590000000467414d41000186a031e8965f0000005b49444154789c2dccb10903
300c05d1ebd204b24a200b7a346f90153c82c18d0a61450751f1e08a2faaead2
a4846ccea9255306e753345712e211b221bf4b263d1b427325255e8bdab29e6f
6aca30692e9d29616ee96f3065f0bf1f1087492fd02f14c90000000049454e44
ae426082
"""),
'basn0g02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002002000000001ca13d
890000000467414d41000186a031e8965f0000001f49444154789c6360085df5
1f8cf1308850c20053868f0133091f6390b90700bd497f818b0989a900000000
49454e44ae426082
"""),
# A version of basn0g04 dithered down to 3 bits.
'Basn0g03': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
2900000001734249540371d88211000000fd49444154789c6d90d18906210c84
c356f22356b2889588604301b112112b11d94a96bb495cf7fe87f32d996f2689
44741cc658e39c0b118f883e1f63cc89dafbc04c0f619d7d898396c54b875517
83f3a2e7ac09a2074430e7f497f00f1138a5444f82839c5206b1f51053cca968
63258821e7f2b5438aac16fbecc052b646e709de45cf18996b29648508728612
952ca606a73566d44612b876845e9a347084ea4868d2907ff06be4436c4b41a3
a3e1774285614c5affb40dbd931a526619d9fa18e4c2be420858de1df0e69893
a0e3e5523461be448561001042b7d4a15309ce2c57aef2ba89d1c13794a109d7
b5880aa27744fc5c4aecb5e7bcef5fe528ec6293a930690000000049454e44ae
426082
"""),
'basn0g04': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
290000000467414d41000186a031e8965f0000004849444154789c6360601014
545232367671090d4d4b2b2f6720430095dbd1418e002a77e64c720450b9ab56
912380caddbd9b1c0154ee9933e408a072efde25470095fbee1d1902001f14ee
01eaff41fa0000000049454e44ae426082
"""),
'basn0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
280000000467414d41000186a031e8965f0000004149444154789c6364602400
1408c8b30c05058c0f0829f8f71f3f6079301c1430ca11906764a2795c0c0605
8c8ff0cafeffcff887e67131181430cae0956564040050e5fe7135e2d8590000
000049454e44ae426082
"""),
'basn0g16': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000200000002010000000000681f9
6b0000000467414d41000186a031e8965f0000005e49444154789cd5d2310ac0
300c4351395bef7fc6dca093c0287b32d52a04a3d98f3f3880a7b857131363a0
3a82601d089900dd82f640ca04e816dc06422640b7a03d903201ba05b7819009
d02d680fa44c603f6f07ec4ff41938cf7f0016d84bd85fae2b9fd70000000049
454e44ae426082
"""),
'basn2c08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
a30000000467414d41000186a031e8965f0000004849444154789cedd5c10900
300c024085ec91fdb772133b442bf4a1f8cee12bb40d043b800a14f81ca0ede4
7d4c784081020f4a871fc284071428f0a0743823a94081bb7077a3c00182b1f9
5e0f40cf4b0000000049454e44ae426082
"""),
'basn2c16': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000201002000000ac8831
e00000000467414d41000186a031e8965f000000e549444154789cd596c10a83
301044a7e0417fcb7eb7fdadf6961e06039286266693cc7a188645e43dd6a08f
1042003e2fe09aef6472737e183d27335fcee2f35a77b702ebce742870a23397
f3edf2705dd10160f3b2815fe8ecf2027974a6b0c03f74a6e4192843e75c6c03
35e8ec3202f5e84c0181bbe8cca967a00d9df3491bb040671f2e6087ce1c2860
8d1e05f8c7ee0f1d00b667e70df44467ef26d01fbd9bc028f42860f71d188bce
fb8d3630039dbd59601e7ab3c06cf428507f0634d039afdc80123a7bb1801e7a
b1802a7a14c89f016d74ce331bf080ce9e08f8414f04bca133bfe642fe5e07bb
c4ec0000000049454e44ae426082
"""),
'basn3p04': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200403000000815467
c70000000467414d41000186a031e8965f000000037342495404040477f8b5a3
0000002d504c54452200ff00ffff8800ff22ff000099ffff6600dd00ff77ff00
ff000000ff99ddff00ff00bbffbb000044ff00ff44d2b049bd00000047494441
54789c63e8e8080d3d7366d5aaf27263e377ef66ce64204300952b28488e002a
d7c5851c0154eeddbbe408a07119c81140e52a29912380ca4d4b23470095bb7b
37190200e0c4ead10f82057d0000000049454e44ae426082
"""),
'basn6a08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200806000000737a7a
f40000000467414d41000186a031e8965f0000006f49444154789cedd6310a80
300c46e12764684fa1f73f55048f21c4ddc545781d52e85028fc1f4d28d98a01
305e7b7e9cffba33831d75054703ca06a8f90d58a0074e351e227d805c8254e3
1bb0420f5cdc2e0079208892ffe2a00136a07b4007943c1004d900195036407f
011bf00052201a9c160fb84c0000000049454e44ae426082
"""),
'cs3n3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f0000000373424954030303a392a042
00000054504c544592ff0000ff9200ffff00ff0000dbff00ff6dffb600006dff
b6ff00ff9200dbff000049ffff2400ff000024ff0049ff0000ffdb00ff4900ff
b6ffff0000ff2400b6ffffdb000092ffff6d000024ffff49006dff00df702b17
0000004b49444154789c85cac70182000000b1b3625754b0edbfa72324ef7486
184ed0177a437b680bcdd0031c0ed00ea21f74852ed00a1c9ed0086da0057487
6ed0121cd6d004bda0013a421ff803224033e177f4ae260000000049454e44ae
426082
"""),
'f02n0g08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200800000000561125
280000012a49444154789c85d12f4b83511805f0c3f938168b2088200882410c
03834dd807182c588749300c5604c30b0b03c360e14d826012c162b1182c8241
100441f47dee5fc3a6f7b9efc2bdf9c7e59cf370703a3caf26d3faeae6f6fee1
f1e9f9e5f5edfde3f3ebbb31d6f910227f1a6944448c31d65aebac77de7b1f42
883146444a41b029084a41500a825210340541d1e2607f777b733d13344a7401
00c8046d127da09a4ceb5cd024010c45446a40e5a04d029827055452da247ac7
f32e80ea42a7c4a20ba0dad22e892ea0f6a06b8b3e50a9c5e85ae264d1e54fd0
e762040cb2d5e93331067af95de8b4980147adcb3128710d74dab7a54fe20ec0
ec727c313a53822109fc3ff50743122bab6b1b5b3b7b9d439d834189e5d54518
0b82b120180b82b1208882200ae217e9e497bfbfccebfd0000000049454e44ae
426082
"""),
's09n3p02': _dehex(b"""
89504e470d0a1a0a0000000d49484452000000090000000902030000009dffee
830000000467414d41000186a031e8965f000000037342495404040477f8b5a3
0000000c504c544500ff000077ffff00ffff7700ff5600640000001f49444154
789c63600002fbff0c0c56ab19182ca381581a4283f82071200000696505c36a
437f230000000049454e44ae426082
"""),
'tbgn3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f00000207504c54457f7f7fafafafab
abab110000222200737300999999510d00444400959500959595e6e600919191
8d8d8d620d00898989666600b7b700911600000000730d007373736f6f6faaaa
006b6b6b676767c41a00cccc0000f30000ef00d51e0055555567670000dd0051
515100d1004d4d4de61e0038380000b700160d0d00ab00560d00090900009500
009100008d003333332f2f2f2f2b2f2b2b000077007c7c001a05002b27000073
002b2b2b006f00bb1600272727780d002323230055004d4d00cc1e00004d00cc
1a000d00003c09006f6f00002f003811271111110d0d0d55554d090909001100
4d0900050505000d00e2e200000900000500626200a6a6a6a2a2a29e9e9e8484
00fb00fbd5d500801100800d00ea00ea555500a6a600e600e6f7f700e200e233
0500888888d900d9848484c01a007777003c3c05c8c8008080804409007c7c7c
bb00bbaa00aaa600a61e09056262629e009e9a009af322005e5e5e05050000ee
005a5a5adddd00a616008d008d00e20016050027270088110078780000c40078
00787300736f006f44444400aa00c81e004040406600663c3c3c090000550055
1a1a00343434d91e000084004d004d007c004500453c3c00ea1e00222222113c
113300331e1e1efb22001a1a1a004400afaf00270027003c001616161e001e0d
160d2f2f00808000001e00d1d1001100110d000db7b7b7090009050005b3b3b3
6d34c4230000000174524e530040e6d86600000001624b474402660b7c640000
01f249444154789c6360c0048c8c58049100575f215ee92e6161ef109cd2a15e
4b9645ce5d2c8f433aa4c24f3cbd4c98833b2314ab74a186f094b9c2c27571d2
6a2a58e4253c5cda8559057a392363854db4d9d0641973660b0b0bb76bb16656
06970997256877a07a95c75a1804b2fbcd128c80b482a0b0300f8a824276a9a8
ec6e61612b3e57ee06fbf0009619d5fac846ac5c60ed20e754921625a2daadc6
1967e29e97d2239c8aec7e61fdeca9cecebef54eb36c848517164514af16169e
866444b2b0b7b55534c815cc2ec22d89cd1353800a8473100a4485852d924a6a
412adc74e7ad1016ceed043267238c901716f633a812022998a4072267c4af02
92127005c0f811b62830054935ce017b38bf0948cc5c09955f030a24617d9d46
63371fd940b0827931cbfdf4956076ac018b592f72d45594a9b1f307f3261b1a
084bc2ad50018b1900719ba6ba4ca325d0427d3f6161449486f981144cf3100e
2a5f2a1ce8683e4ddf1b64275240c8438d98af0c729bbe07982b8a1c94201dc2
b3174c9820bcc06201585ad81b25b64a2146384e3798290c05ad280a18c0a62e
e898260c07fca80a24c076cc864b777131a00190cdfa3069035eccbc038c30e1
3e88b46d16b6acc5380d6ac202511c392f4b789aa7b0b08718765990111606c2
9e854c38e5191878fbe471e749b0112bb18902008dc473b2b2e8e72700000000
49454e44ae426082
"""),
'Tp2n3p08': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020080300000044a48a
c60000000467414d41000186a031e8965f00000300504c544502ffff80ff05ff
7f0703ff7f0180ff04ff00ffff06ff000880ff05ff7f07ffff06ff000804ff00
0180ff02ffff03ff7f02ffff80ff0503ff7f0180ffff0008ff7f0704ff00ffff
06ff000802ffffff7f0704ff0003ff7fffff0680ff050180ff04ff000180ffff
0008ffff0603ff7f80ff05ff7f0702ffffff000880ff05ffff0603ff7f02ffff
ff7f070180ff04ff00ffff06ff000880ff050180ffff7f0702ffff04ff0003ff
7fff7f0704ff0003ff7f0180ffffff06ff000880ff0502ffffffff0603ff7fff
7f0702ffff04ff000180ff80ff05ff0008ff7f07ffff0680ff0504ff00ff0008
0180ff03ff7f02ffff02ffffffff0604ff0003ff7f0180ffff000880ff05ff7f
0780ff05ff00080180ff02ffffff7f0703ff7fffff0604ff00ff7f07ff0008ff
ff0680ff0504ff0002ffff0180ff03ff7fff0008ffff0680ff0504ff000180ff
02ffff03ff7fff7f070180ff02ffff04ff00ffff06ff0008ff7f0780ff0503ff
7fffff06ff0008ff7f0780ff0502ffff03ff7f0180ff04ff0002ffffff7f07ff
ff0604ff0003ff7fff00080180ff80ff05ffff0603ff7f0180ffff000804ff00
80ff0502ffffff7f0780ff05ffff0604ff000180ffff000802ffffff7f0703ff
7fff0008ff7f070180ff03ff7f02ffff80ff05ffff0604ff00ff0008ffff0602
ffff0180ff04ff0003ff7f80ff05ff7f070180ff04ff00ff7f0780ff0502ffff
ff000803ff7fffff0602ffffff7f07ffff0680ff05ff000804ff0003ff7f0180
ff02ffff0180ffff7f0703ff7fff000804ff0080ff05ffff0602ffff04ff00ff
ff0603ff7fff7f070180ff80ff05ff000803ff7f0180ffff7f0702ffffff0008
04ff00ffff0680ff0503ff7f0180ff04ff0080ff05ffff06ff000802ffffff7f
0780ff05ff0008ff7f070180ff03ff7f04ff0002ffffffff0604ff00ff7f07ff
000880ff05ffff060180ff02ffff03ff7f80ff05ffff0602ffff0180ff03ff7f
04ff00ff7f07ff00080180ffff000880ff0502ffff04ff00ff7f0703ff7fffff
06ff0008ffff0604ff00ff7f0780ff0502ffff03ff7f0180ffdeb83387000000
f874524e53000000000000000008080808080808081010101010101010181818
1818181818202020202020202029292929292929293131313131313131393939
393939393941414141414141414a4a4a4a4a4a4a4a52525252525252525a5a5a
5a5a5a5a5a62626262626262626a6a6a6a6a6a6a6a73737373737373737b7b7b
7b7b7b7b7b83838383838383838b8b8b8b8b8b8b8b94949494949494949c9c9c
9c9c9c9c9ca4a4a4a4a4a4a4a4acacacacacacacacb4b4b4b4b4b4b4b4bdbdbd
bdbdbdbdbdc5c5c5c5c5c5c5c5cdcdcdcdcdcdcdcdd5d5d5d5d5d5d5d5dedede
dededededee6e6e6e6e6e6e6e6eeeeeeeeeeeeeeeef6f6f6f6f6f6f6f6b98ac5
ca0000012c49444154789c6360e7169150d230b475f7098d4ccc28a96ced9e32
63c1da2d7b8e9fb97af3d1fb8f3f18e8a0808953544a4dd7c4c2c9233c2621bf
b4aab17fdacce5ab36ee3a72eafaad87efbefea68702362e7159652d031b07cf
c0b8a4cce28aa68e89f316aedfb4ffd0b92bf79fbcfcfe931e0a183904e55435
8decdcbcc22292b3caaadb7b27cc5db67af3be63e72fdf78fce2d31f7a2860e5
119356d037b374f10e8a4fc92eaa6fee99347fc9caad7b0f9ebd74f7c1db2fbf
e8a180995f484645dbdccad12f38363dafbcb6a573faeca5ebb6ed3e7ce2c29d
e76fbefda38702063e0149751d537b67ff80e8d4dcc29a86bea97316add9b0e3
c0e96bf79ebdfafc971e0a587885e515f58cad5d7d43a2d2720aeadaba26cf5a
bc62fbcea3272fde7efafac37f3a28000087c0fe101bc2f85f0000000049454e
44ae426082
"""),
'tbbn1g04': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020040000000093e1c8
290000000467414d41000186a031e8965f0000000274524e530007e8f7589b00
000002624b47440000aa8d23320000013e49444154789c55d1cd4b024118c7f1
efbe6419045b6a48a72d352808b435284f9187ae9b098627a1573a19945beba5
e8129e8222af11d81e3a4545742de8ef6af6d5762e0fbf0fc33c33f36085cb76
bc4204778771b867260683ee57e13f0c922df5c719c2b3b6c6c25b2382cea4b9
9f7d4f244370746ac71f4ca88e0f173a6496749af47de8e44ba8f3bf9bdfa98a
0faf857a7dd95c7dc8d7c67c782c99727997f41eb2e3c1e554152465bb00fe8e
b692d190b718d159f4c0a45c4435915a243c58a7a4312a7a57913f05747594c6
46169866c57101e4d4ce4d511423119c419183a3530cc63db88559ae28e7342a
1e9c8122b71139b8872d6e913153224bc1f35b60e4445bd4004e20ed6682c759
1d9873b3da0fbf50137dc5c9bde84fdb2ec8bde1189e0448b63584735993c209
7a601bd2710caceba6158797285b7f2084a2f82c57c01a0000000049454e44ae
426082
"""),
'tbrn2c08': _dehex(b"""
89504e470d0a1a0a0000000d4948445200000020000000200802000000fc18ed
a30000000467414d41000186a031e8965f0000000674524e53007f007f007f8a
33334f00000006624b474400ff0000000033277cf3000004d649444154789cad
965f68537714c73fd912d640235e692f34d0406fa0c1663481045ab060065514
56660a295831607df0a1488715167060840a1614e6431e9cb34fd2c00a762c85
f6a10f816650c13b0cf40612e1822ddc4863bd628a8924d23d6464f9d3665dd9
f7e977ce3dbff3cd3939bfdfef6bb87dfb364782dbed065ebe7cd93acc78b4ec
a228debd7bb7bfbfbfbbbbfb7f261045311a8d261209405194274f9ea4d3e916
f15f1c3eb5dd6e4fa5fecce526239184a2b0b8486f6f617171b1f5ae4311381c
8e57af5e5dbd7a351088150a78bd389d44222c2f93cdfe66b7db8f4ee07038b6
b6b6bebf766d7e7e7e60a06432313b4ba984c3c1c4049a46b95c5a58583822c1
dbb76f27272733d1b9df853c3030c0f232562b9108cf9eb1b888d7cbf030abab
31abd5fa1f08dc6ef7e7cf9f1f3f7e1c8944745d4f1400c62c001313acad21cb
b8dd2c2c603271eb1640341aad4c6d331aa7e8c48913a150a861307ecc11e964
74899919bc5e14e56fffc404f1388502f178dceff7ef4bf0a5cfe7abb533998c
e5f9ea2f1dd88c180d64cb94412df3dd57e83a6b3b3c7a84c98420100c72fd3a
636348bae726379fe69e8e8d8dbd79f3a6558b0607079796965256479b918085
7b02db12712b6181950233023f3f647494ee6e2e5ea45864cce5b8a7fe3acffc
3aebb22c2bd5d20e22d0757d7b7bbbbdbd3d94a313bed1b0aa3cd069838b163a
8d4c59585f677292d0b84d9a995bd337def3fe6bbe5e6001989b9b6bfe27ea08
36373781542ab56573248b4c5bc843ac4048c7ab21aa24ca00534c25482828a3
8c9ee67475bbaaaab22cb722c8e57240a150301a8d219de94e44534d7d90e885
87acb0e2c4f9800731629b6c5ee14a35a6b9887d2a0032994cb9cf15dbe59650
ff7b46a04c9a749e7cc5112214266cc65c31354d5b5d5d3d90209bcd5616a552
a95c2e87f2a659bd9ee01c2cd73964e438f129a6aa9e582c363838b80f81d7eb
5555b56a2a8ad2d9d7affd0409f8015c208013fea00177b873831b0282c964f2
783c1e8fa7582cee5f81a669b5e6eeeeaee58e8559b0c233d8843c7c0b963a82
34e94b5cb2396d7d7d7db22c8ba258fb0afd43f0e2c58b919191ba9de9b4d425
118329b0c3323c8709d02041b52b4ea7f39de75d2a934a2693c0a953a76a93d4
5d157ebf7f6565a5542a553df97c5e10045dd731c130b86113cc300cbd489224
08422a952a140a95788fc763b1d41558d7a2d7af5f5fb870a1d6a3aaaacd6603
18802da84c59015bd2e6897b745d9765b99a1df0f97c0daf74e36deaf7fbcd66
73ad2797cb89a2c839880188a2e8743a8bc5a22ccbba5e376466b3b9bdbdbd21
6123413a9d0e0402b51e4dd3bababa788eb022b85caeb6b6364551b6b7b76942
43f7f727007a7a7a04a1ee8065b3595fde2768423299ac1ec6669c3973e65004
c0f8f878ad69341a33994ced2969c0d0d0502412f9f8f163f3a7fd654b474787
288ad53e74757535df6215b85cae60302849d2410aecc037f9f2e5cbd5b5c160
680eb0dbede170381c0e7ff8f0a185be3b906068684892a4ca7a6f6faff69328
8ad3d3d3f7efdfdfdbdbfb57e96868a14d0d0643381c96242997cbe5f3794010
84603078fcf8f1d6496bd14a3aba5c2ea7d369341a5555b5582c8140e0fcf9f3
1b1b1b87cf4eeb0a8063c78e45a3d19e9e1ebfdfdf5a831e844655d18093274f
9e3d7bf6d3a74f3b3b3b47c80efc05ff7af28fefb70d9b0000000049454e44ae
426082
"""),
'basn6a16': _dehex(b"""
89504e470d0a1a0a0000000d494844520000002000000020100600000023eaa6
b70000000467414d41000186a031e8965f00000d2249444154789cdd995f6c1c
d775c67ff38fb34b724d2ee55a8e4b04a0ac87049100cab4dbd8c6528902cb4d
10881620592e52d4325ac0905bc98a94025e71fd622cb5065ac98a0c283050c0
728a00b6e542a1d126885cd3298928891d9a0444037e904434951d4b90b84b2f
c9dde1fcebc33977a95555348f411e16dfce9d3b77ee77eebde77ce78c95a669
0ad07c17009a13edd898b87dfb1fcb7d2b4d1bff217f33df80deb1e6267df0ff
c1e6e6dfafdf1f5a7fd30f9aef66b6d546dd355bf02c40662e3307f9725a96c6
744c3031f83782f171c148dbc3bf1774f5dad1e79d6f095a3f54d4fbec5234ef
d9a2f8d73afe4f14f57ef4f42def7b44f19060f06b45bddf1c5534d77fd922be
2973a15a82e648661c6e3240aa3612ead952b604bde57458894f29deaf133bac
13d2766f5227a4a3b8cf08da7adfd6fbd6bd8a4fe9dbb43d35e3dfa3f844fbf8
9119bf4f7144094fb56333abf8a86063ca106f94b3a3b512343765e60082097f
1bb86ba72439a653519b09f5cee1ce61c897d37eedf5553580ae60f4af8af33a
b14fd400b6a0f34535c0434afc0b3a9f07147527a5fa7ca218ff56c74d74dc3f
155cfd3325fc278acf2ae1cb4a539f5f9937c457263b0bd51234c732a300cdd1
cc1840f0aaff54db0e4874ed5a9b5d6d27d4bb36746d80de72baa877ff4b275a
d7895ed1897ea4139b5143fcbb1a62560da1ed9662aaed895ec78a91c18795b8
5e07ab4af8ba128e95e682e0728bf8f2e5ae815a091a53d902ac1920d8e05f06
589de8d8d66680789f4e454fb9d9ec66cd857af796ee2d902fa73fd5bba775a2
153580ae44705ed0d37647d15697cb8f14bfa3e3e8fdf8031d47af571503357c
f30d25acedcbbf135c9a35c49766ba07ab255859e8ec03684e66860182dff8f7
0304bff6ff1c20fc81b7afdd00a71475539a536e36bb5973a19e3b923b02bde5
e4efd4003ac170eb2d13fe274157afedbd82d6fb3a9a1e85e4551d47cf7078f8
9671fe4289ebf5f2bf08d63f37c4eb4773c55a0996efeefa0ca011671d8060ca
2f0004c7fcc300e166ef0240f825efe3361f106d57d423d0723f7acacd66376b
2ed47b7a7a7a205f4ef4ac4691e0aad9aa0d41cf13741c3580a506487574ddca
61a8c403c1863ebfbcac3475168b2de28b8b3d77544bb05ce92a02aceced3c0d
d0cc65ea371b201cf1c601c24dde1c4078cedbdeb60322f50126a019bf6edc9b
39e566b39b3517eaf97c3e0fbde5e4491d45bd74537145d155b476aa0176e868
c6abebf30dbd5e525c54ac8e18e2d56abeb756827a3d970358a97416019a6f64
f60004fdfe1580d5c98e618070cc1b05887eee7e0d209a70db7d8063029889b4
c620ead78d7b33a7dc6c76b3e6427ddddbebde867c393aa7845e5403e8ca794a
d0d6fb897af5f03525fe5782f5e7046bdaef468bf88d1debc6ab25583cd17310
6079b9ab0ba059c914018245bf076075b5a303200c3c1f209a733701444fbbaf
00c4134ebb016c5d0b23614c243701cdf875e3decce9349bddacb9505fbf7dfd
76e82d87736a00f5d2b5ffd4b7dce2719a4d25ae717ee153c1abef18e257cfad
7fa45682da48ef38c052b53b0fd06864b300c151ff08c0ea431de701a287dd5f
004497dc7b01a253ee3e80b8c7f91c20f967fb6fdb7c80ada7d8683723614c24
3701cdf875e3decc29379bddacb950ef3fd47f08f2e5a61ea4aa2a3eb757cd55
13345efcfa59c12b2f19e2578ef77fb75a82854ffbee01a83f977b11a031931d
040802df07082b5e11207cc17b1e209a770700e2df0a83e409fb7580f827c230
99b06fd901fb058d6835dacd481813c94d40337eddb83773cacd66376b2ed437
bebcf165e82d2f4e4beb7f3fa6e652c2d7ee10bc78c010bfb87fe3c95a09ae9f
bd732740bd2fb700d0f865f64180e059ff044018ca0ca28a5b04883f701e0088
bfec7c0c909cb71f0448c6ec518074b375012079d9dedf66004bcfbc51eb2dd1
aadacd481813c94d40337eddb83773cacd66376b2ed487868686205fbe7c49ef
5605a73f34c4a7a787eeab96e0da81bb4e022c15ba27019a5b339300e16bf286
a8eae601e25866907cdf3e0890acb36f00245fb57f05904e59c300e92561946e
b2e600d209ab7d07f04d458dfb46ad1bd16ab49b913026929b8066fcba716fe6
949bcd6ed65ca8ef7e7cf7e3d05b7e7c8f217ee6cdddbb6a25a856f37980e0c7
fe4e80a82623c48193014846ec7180f4acf518409aca0cd28a5504e03b32c374
de1a00608a0240faaa327a4b19fe946fb6f90054dbb5f2333d022db56eb4966a
3723614c243701cdf8f556bea8a7dc6c76b3e66bd46584ddbbcebc0990cf4b0f
ff4070520c282338a7e26700ec725202b01e4bcf0258963c6f1d4d8f0030cb20
805549c520930c03584fa522b676f11600ffc03fde3e1b3489a9c9054c9aa23b
c08856a3dd8c843191dc0434e3d78d7b33a75c36fb993761f7ae5a69f72ef97f
e6ad336fed7e1c60e8bee96980bbdebbb60da07b7069062033d9dc0ae03d296f
70ab511ec071640676252902d833c916007b3e1900b0a6d2028035968e025861
ea01581369fb11488c34d18cbc95989afccca42baad65ba2d5683723614c24d7
8066fcbab8b7e96918baaf5aaa56219f975fb50a43f7c9bde90fa73f1c1a02d8
78f2e27e803b77ca08b90519315b6fe400fc1392097a9eccc0ad444500e70199
a1331f0f00d8934901c07e5d526ceb87c2d07e2579badd005a2b31a5089391b7
1253358049535a6add8856dd0146c298482e01ede27ed878b256ba7600ee3a09
c18fc1df09fe01084ec25defc1b56db0f1a4f4bd78e0e2818d2f0334e7330300
7df7c888b917e50dd9c1c60c80efcb0cbc63e1f700bce7c31700dccbd1060027
8add9b0de06c8e2f00d84962b7d7030e2a61538331b98051f92631bd253f336a
dd8856a3dd44c25c390efddfad96ae9f853b77c25201ba27c533b8bdf28b6ad0
3d084b33d2e7fa59099e9901b8f2d29597fa0f01848f78e70082117f1ca07b76
6910209b9519f895a008d031bbba05c09d8f06005c5b18b8fba25300cea6780e
c03e911c6ccf06d507b48a4fa606634a114609de929f9934c5a87511ad57cfc1
fa476aa5854fa1ef1e3910b905686e85cc24c40138198915f133d2d6dc2a7dea
7df2ccc2a752faf2cec1d577aebeb37e3b4034eeee0008dff3be0e6b923773b4
7904c0ef9119767cb4fa1500ef1361e08e452500f71561e84cc4ed3e20fab6a2
c905f40cb76a3026bf3319b91ac2e46792a6dcd801ebc6aba5da08f48ecb81c8
bd088d5f42f6417191de93908c803d0e76199292b485af41b60e8d9c3c537f0e
8211f0c7211a077707dc18b931b2ee6d80a4d7ae024491ebc24d4a708ff70680
7f25e807e8785f1878e322d6ddaf453f0770ff2dfa769b01423dbbad72a391b6
5a7c3235985629423372494cab55c8f7d64a8b27a0e7202c55a13b0f8d19c80e
4ae9ca3f015115dc3ca467c17a4c7ee95970ab10e5a54ff0ac3cd39881ee5958
1a84f03df0be0e492fd855a8d6aa35d10b4962dbb0a604a3d3ee5e80a8eee600
a24977f8660378bf0bbf00e01d0a8fb7f980f04b8aa6ce6aca8d5a7533c52753
839152c4e222f4dc512dd5eb90cbc981e8ea12cf90cd8a8bf47d89159e2741d3
7124f65b96fcd254dae258fa84a13c13043246a32129574787e49eae2b49b86d
c3e2e78b9ff7f4002415bb08907c66df0d103b4e0c104db90500ff70700c203a
ee1e82dba4c3e16e256c0acca6ceaae9afd1f612d7eb472157ac95962bd05594
7dd1598466053245088e827f44628657942a825b84e4fb601f84b4025611aca3
901e01bb024911dc0a4445f08e41f83df02b10142173149ab71baf027611ea95
7a257704201d14cd9af4d90b00f194530088cb4e09c0df1c5c0088f7393f6833
c0aa3ac156655de3bca9b34ab9716906ba07aba5e5bba1eb3358d90b9da7c533
64f6888bf47b60f521e8380fe10be03d2feac17900927560df40f4e48f805960
50328d648bf4893f9067c217a0631656b7c898c122847bc07b03a2d3e0ee85e4
33b0ef867450c4fad2ecd26cf7168074c0ba0c904cdac300c9cfec4701924df6
1cdca61e10685c6f7d52d0caba1498972f43d740adb4b2009d7d7220b20e3473
90a943d00ffe959bb6eac3e0fe42ea49ee00c45f06e76329b1dabf127d690d80
5581b408f63c2403e0cc433c00ee658836803b0fd100747c04ab5f917704fd10
d5c1cd41ec801343d207f602a403605d86e5f9e5f9ae0d00e994556833806685
c931fb709b0f08b4e869bea5c827859549e82c544b8d29c816a0390999613920
7e610d5727a16318c2003c1fa24be0de2b32caf92224e7c17e5004b6350c4c01
05601218066b0ad28224e149019c086257ca315102de2712903bde97b8144d82
3b2c6ac52d403c054e019249b087f53d0558995a99ea946c70cc927458b3c1ff
550f30050df988d4284376b4566a8e416654cc921985e037e0df0fc131f00f4b
acf0c6211c036f14a239703741740adc7da227edd7e56b833d0ae92549b4d357
25dfb49ed2ff63908e6adf27d6d0dda7638d4154d2778daca17f58e61297c129
41f233b01f5dc3740cac51688c35c6b22580f48224fee9b83502569a66b629f1
09f3713473413e2666e7fe6f6c6efefdfafda1f56f6e06f93496d9d67cb7366a
9964b6f92e64b689196ec6c604646fd3fe4771ff1bf03f65d8ecc3addbb5f300
00000049454e44ae426082
"""),
}
for name, data in png.items():
setattr(sys.modules[__name__], name, data)
|
Ratfink/micropython-png
|
pngsuite.py
|
Python
|
mit
| 31,193
|
#@Author: Kyle Mede, kylemede@astron.s.u-tokyo.ac.jp or kylemede@gmail.com
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import copy
import KMlogger
from six.moves import range
## import from modules in ExoSOFT ##
from .cytools import orbit, model_input_pars
from .utils import load_di_data, load_rv_data
log = KMlogger.getLogger('main.model',lvl=100,addFH=False)
class ExoSOFTmodel(object):
"""
"""
def __init__(self,sd):
####################
## member variables
####################
#resulting fit values
self.chi_squared_3d = 0
self.chi_squared_di = 0
self.chi_squared_rv = 0
self.prior = 0
self.sd = sd
## TRACK BEST CHI SQUAREDS FOUND SO FAR IN HERE?
## makes more sense to change this to 'ExoSOFTresults' and name the object 'Results'??!!
## load in the RV and Astrometry (DI) data
(epochs_di, rapa, rapa_err, decsa, decsa_err) = load_di_data(self.sd['di_dataFile'])
(epochs_rv, rv, rv_err, rv_inst_num) = load_rv_data(self.sd['rv_dataFile'])
## prior functions??
self.Params = ExoSOFTparams(self.sd['omega_offset_di'],
self.sd['omega_offset_rv'], self.sd['vary_tc'], self.sd['tc_equal_to'],
self.sd['data_mode'], self.sd['low_ecc'], self.sd['range_maxs'], self.sd['range_mins'],
self.sd['num_offsets'])
self.Data = ExoSOFTdata(epochs_di, epochs_rv, rapa, rapa_err, decsa, decsa_err,
rv, rv_err, rv_inst_num,self.sd['data_mode'], self.sd['pasa'])
ExoSOFTpriors = self.sd['ExoSOFTpriors']
self.Priors = ExoSOFTpriors(ecc_prior=self.sd['ecc_prior'],
p_prior=self.sd['p_prior'], inc_prior=self.sd['inc_prior'],
m1_prior=self.sd['m1_prior'], m2_prior=self.sd['m2_prior'],
para_prior=self.sd['para_prior'],para_est=self.sd['para_est'],
para_err=self.sd['para_err'], m1_est=self.sd['m1_est'],
m1_err=self.sd['m1_err'], m2_est=self.sd['m2_est'],
m2_err=self.sd['m2_err'],
mins_ary=self.sd['range_mins'],maxs_ary=self.sd['range_maxs'])
class ExoSOFTparams(object):
"""
+---+--------------------+---------------+-------------------+-------+
| | Directly Varried | Model Inputs | Stored Parameters | |
+---+--------------------+---------------+-------------------+-------+
| | direct_pars | model_in_pars | stored_pars | |
+---+--------------------+---------------+-------------------+-------+
| i | Parameter | Parameter | Parameter | units |
+===+====================+===============+===================+=======+
| 0 |Mass of Primary (m1)| m1 | m1 | Msun |
+---+--------------------+---------------+-------------------+-------+
.
.
.$$ FILL THIS OUT!!!!
"""
def __init__(self, omega_offset_di, omega_offset_rv, vary_tc, tc_equal_to,
di_only, low_ecc, range_maxs, range_mins, num_offsets):
# params that effect calculating the full list of params from the directly varied one
self.omega_offset_di = omega_offset_di
self.omega_offset_rv = omega_offset_rv
self.vary_tc = vary_tc
self.tc_equal_to = tc_equal_to
self.di_only = di_only
self.low_ecc = low_ecc
## max/min ranges
self.maxs = range_maxs
self.mins = range_mins
## prep versions of all param arrays
self.num_offsets = num_offsets
# direct_pars: [m1,m2,parallax,long_an,e OR sqrt(e)*sin(arg_peri),to/tc,p,inc,arg_peri OR sqrt(e)*cos(arg_peri),v1,v2...]
self.direct_pars = np.zeros((9+num_offsets),dtype=np.dtype('d'))
# model_in_params: [m1,m2,parallax,long_an,e,to,tc,p,inc,arg_peri,arg_peri_di,arg_peri_rv,a_tot_au,K]
self.model_in_pars = np.zeros((14),dtype=np.dtype('d'))
# stored_pars: [m1,m2,parallax,long_an,e,to,tc,p,inc,arg_peri,a_tot_au,chi_sqr,K,v1,v2...]
self.stored_pars = np.zeros((13+num_offsets),dtype=np.dtype('d'))
self.offsets = np.zeros((num_offsets),dtype=np.dtype('d'))
#check_pars: [m1, m2, parallax, long_an, e, to/tc, p, inc, arg_peri]
self.check_pars = np.zeros((9+num_offsets),dtype=np.dtype('d'))
def make_model_in(self):
"""
Convert directly varied parameters into a comprehensive list
of those used ans inputs to during model calculations.
model_in_params: [m1,m2,parallax,long_an,e,to,tc,p,inc,arg_peri,arg_peri_di,arg_peri_rv,a_tot_au,K]
"""
model_input_pars(self.direct_pars, self.low_ecc, self.tc_equal_to,
self.vary_tc, self.di_only, self.omega_offset_di,
self.omega_offset_rv, self.model_in_pars)
self.offsets = self.direct_pars[9:]
#print('self.offsets = '+repr(self.offsets))
## Wrap periodic params into allowed ranges. ie. long_an and arg_peri
m_par_ints = [3,9]
min_max_ints = [3,8]
for i in [0,1]:
if self.mins[min_max_ints[i]] > self.model_in_pars[m_par_ints[i]]:
#print('par was '+str(model_input_pars[m_par_ints[i]]))
self.model_in_pars[m_par_ints[i]]+=360.0
#print('now '+str(model_input_pars[m_par_ints[i]]))
elif self.model_in_pars[m_par_ints[i]] > self.maxs[min_max_ints[i]]:
#print('par was '+str(model_input_pars[m_par_ints[i]]))
self.model_in_pars[m_par_ints[i]]-=360.0
#print('now '+str(model_input_pars[m_par_ints[i]]))
#print(repr(self.model_in_pars))
def stored_to_direct(self,pars):
""" take a set of parameters matching 'stored_pars' and make the
directly varied versions matching 'direct_pars'.
Note:
direct_pars: [m1,m2,parallax,long_an,e OR sqrt(e)*sin(arg_peri),to/tc,p,inc,arg_peri OR sqrt(e)*cos(arg_peri),v1,v2...]
stored_pars: [m1,m2,parallax,long_an,e,to,tc,p,inc,arg_peri,a_tot_au,chi_sqr,K,v1,v2...]
"""
direct_ary = np.zeros((9+self.num_offsets),dtype=np.dtype('d'))
direct_ary[0:4] = pars[0:4]
if self.low_ecc:
direct_ary[4] = np.sqrt(pars[4])*np.sin(np.radians(pars[9]))
direct_ary[8] = np.sqrt(pars[4])*np.cos(np.radians(pars[9]))
else:
direct_ary[4] = pars[4]
direct_ary[8] = pars[9]
if self.vary_tc:
direct_ary[5] = pars[6]
else:
direct_ary[5] = pars[5]
direct_ary[6:8] = pars[7:9]
direct_ary[9:] = pars[13:]
return direct_ary
def direct_to_stored(self,pars):
""" Take a single set of parameters in 'direct' format and return
the matching set in 'stored' format.
direct_pars: [m1,m2,parallax,long_an,e OR sqrt(e)*sin(arg_peri),to/tc,p,inc,arg_peri OR sqrt(e)*cos(arg_peri),v1,v2...]
stored_pars: [m1,m2,parallax,long_an,e,to,tc,p,inc,arg_peri,a_tot_au,chi_sqr,K,v1,v2...]
"""
self.direct_pars = pars
self.make_model_in()
self.make_stored(1.0e6)
return copy.deepcopy(self.stored_pars)
def make_stored(self,chi_squared):
"""
Push values in model_in_params, offsets and the resulting
chi_squared_3d into an array to be stored on disk during ExoSOFT.
Not sure how to make this work with emcee or other tools...
"""
# model_in_params: [m1,m2,parallax,long_an,e,to,tc,p,inc,arg_peri,arg_peri_di,arg_peri_rv,a_tot_au,K]
# stored_pars: [m1,m2,parallax,long_an,e,to,tc,p,inc,arg_peri,a_tot_au,chi_sqr,K,v1,v2...]
self.stored_pars[0:10] = self.model_in_pars[0:10]
self.stored_pars[10] = self.model_in_pars[12] #a_tot_au
self.stored_pars[11] = chi_squared
self.stored_pars[12] = self.model_in_pars[13] #K
self.stored_pars[13:] = self.offsets[:]
def check_range(self):
"""Determine if all parameters in the full list are within their
allowed ranges.
Range arrays corrispond to parameters in:
[m1, m2, parallax, long_an, e, to/tc, p, inc, arg_peri, v1,v2,...]
"""
debugging = False
self.check_pars[0:5] = self.model_in_pars[0:5]
if self.vary_tc:
self.check_pars[5] = self.model_in_pars[6]
else:
self.check_pars[5] = self.model_in_pars[5]
self.check_pars[6:9] = self.model_in_pars[7:10]
self.check_pars[9:] = self.offsets[:]
if len(self.check_pars)!=len(self.maxs)!=len(self.mins):
print("LENGTH OF CHECK_PARAMS IS NOT EQUAL TO LENGTH OF MINS OR MAXS!!!")
in_range = True
for i in range(len(self.check_pars)):
if (self.check_pars[i]>self.maxs[i]) or (self.check_pars[i]<self.mins[i]):
in_range = False
if debugging:
print("Param # "+str(i)+" out of range")
print(str(self.mins[i])+"!> "+str(self.check_pars[i])+" OR !< "+str(self.maxs[i]))
return in_range
class ExoSOFTdata(object):
"""
An object to contain all the necessary data arrays and parameters to
calculate matching predicted data with the model. All member variables
will remain constant throughout.
Notes:
-Except for rv_inst_num array, all other arrays must be ndarrays of double
precision floating point numbers (dtype=np.dtype('d')).
-Arrays, epochs_di, rapa, rapa_err, decsa, and decsa_err must all have same length.
-Arrays, epochs_rv, rv, rv_err and rv_inst_num must all have same length.
Inputs:
rv_inst_num = ndarray of positive signed or unsigned integers, of same length
as epochs_rv, rv, and rv_err.
"""
def __init__(self, epochs_di, epochs_rv, rapa, rapa_err, decsa, decsa_err,
rv, rv_err, rv_inst_num, data_mode, pasa=False):
self.epochs_di = epochs_di
self.epochs_rv = epochs_rv
# x/RA/PA
self.rapa = rapa
self.rapa_err = rapa_err
self.rapa_model = np.zeros((len(epochs_di)),dtype=np.dtype('d'))
# y/Dec/SA
self.decsa = decsa
self.decsa_err = decsa_err
self.decsa_model = np.zeros((len(epochs_di)),dtype=np.dtype('d'))
# RV
self.rv = rv
self.rv_err = rv_err
self.rv_model = np.zeros((len(epochs_rv)),dtype=np.dtype('d'))
# dataset/instrument number
self.rv_inst_num = rv_inst_num
self.data_mode = data_mode
self.pasa = pasa
def ln_posterior(pars, Model, no_range_check=False):
"""
Calculates the likelihood for a given set of inputs.
Then calculate the natural logarithm of the posterior probability.
-Model is of type ExoSOFTmodel. Currently just holds resulting fit values.
-Data is of type ExoSOFTdata, containing all input data and params to
produce predicted values of matching units, and arrays for predicted values.
-Params is of type ExoSOFTparams, an class containing functions for
calculating versions of the 'pars' used as model inputs, and a version
that would be for storing to disk when ran in ExoSOFT.
-Priors is of type ExoSOFTpriors, containing funtions for each parameter's
prior, a function calculate combined prior given list of params, and any
variables necessary for those calculations.
"""
speed_test = False
if no_range_check:
speed_test = True
## convert params from raw values
Model.Params.direct_pars = pars
Model.Params.make_model_in()
## Range check on proposed params, set ln_post=zero if outside ranges.
ln_post = -np.inf
if speed_test:
in_range=True
else:
in_range = Model.Params.check_range()
if in_range:
## Call Cython func to calculate orbit. ie. -> predicted x,y,rv values.
orbit(Model.Params.model_in_pars, Model.Params.offsets, Model.Data.pasa,
Model.Data.data_mode, Model.Data.epochs_di, Model.Data.epochs_rv,
Model.Data.rv_inst_num, Model.Data.rapa_model,
Model.Data.decsa_model, Model.Data.rv_model)
if speed_test==False:#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
chi_sqr_rv, chi_sqr_rapa, chi_sqr_decsa = 0, 0, 0
if (len(Model.Data.epochs_rv)>0) and (Model.Data.data_mode!='DI'):
#print('rv diffs\n'+repr(np.sort(Model.Data.rv-Model.Data.rv_model)))
chi_sqr_rv = np.sum((Model.Data.rv-Model.Data.rv_model)**2 / Model.Data.rv_err**2)
if (len(Model.Data.epochs_di)>0) and (Model.Data.data_mode!='RV'):
chi_sqr_rapa = np.sum((Model.Data.rapa-Model.Data.rapa_model)**2 / Model.Data.rapa_err**2)
chi_sqr_decsa = np.sum((Model.Data.decsa-Model.Data.decsa_model)**2 / Model.Data.decsa_err**2)
chi_sqr_3d = chi_sqr_rv + chi_sqr_rapa + chi_sqr_decsa
# Remember that chisqr = -2*log(Likelihood). OR,
ln_lik = -0.5*chi_sqr_3d
#print('ln_lik',ln_lik)
## Make version of params with chi_sqr_3d for storing during ExoSOFT
Model.Params.make_stored(chi_sqr_3d)
#print('stored_pars',Model.Params.stored_pars)
## store the chi sqr values in model object for printing in ExoSOFT.
#print('chi_sqr_3d',chi_sqr_3d)
Model.chi_squared_3d = chi_sqr_3d
Model.chi_squared_di = chi_sqr_rapa + chi_sqr_decsa
Model.chi_squared_rv = chi_sqr_rv
## Calculate priors
prior = Model.Priors.priors(Model.Params.stored_pars)
Model.prior = prior
#print('np.log(prior)',np.log(prior))
#print('prior ',prior)
## calculate lnpost
ln_post = np.log(prior) + ln_lik
#print('ln_post ',ln_post)
return ln_post
#EOF
|
kylemede/ExoSOFT
|
ExoSOFT/tools/model.py
|
Python
|
gpl-3.0
| 13,972
|
# flake8: noqa
from settings_shared import *
from ccnmtlsettings.staging import common
locals().update(
common(
project=project,
base=base,
INSTALLED_APPS=INSTALLED_APPS,
STATIC_ROOT=STATIC_ROOT,
cloudfront="d2pl7wm2o23pxj",
))
try:
from local_settings import *
except ImportError:
pass
|
ccnmtl/diabeaters
|
diabeaters/settings_staging.py
|
Python
|
gpl-2.0
| 345
|
from pyquery import PyQuery as pq
from nose.tools import eq_
from django.forms import ModelForm
import amo.tests
from translations import forms, fields
from translations.tests.testapp.models import TranslatedModel
class TestForm(forms.TranslationFormMixin, ModelForm):
name = fields.TransField()
class Meta:
model = TranslatedModel
class TestTranslationFormMixin(amo.tests.TestCase):
def test_default_locale(self):
obj = TranslatedModel()
obj.get_fallback = lambda: 'pl'
f = TestForm(instance=obj)
eq_(f.fields['name'].default_locale, 'pl')
eq_(f.fields['name'].widget.default_locale, 'pl')
eq_(pq(f.as_p())('input:not([lang=init])').attr('lang'), 'pl')
|
robhudson/zamboni
|
apps/translations/tests/test_forms.py
|
Python
|
bsd-3-clause
| 730
|
__author__ = 'victor'
from theano import tensor as T
def norm(A, axis=0, keepdims=False):
return T.sqrt(T.sum(T.sqr(A), axis=axis, keepdims=keepdims))
|
vzhong/pystacks
|
pystacks/utils/theano_ext.py
|
Python
|
mit
| 156
|
"""
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""! @package mbed-greentea-test
Unit tests for mbed-greentea test suite
"""
|
iriark01/greentea
|
test/__init__.py
|
Python
|
apache-2.0
| 658
|
"""
Unit tests for email feature flag in student dashboard. Additionally tests
that bulk email is always disabled for non-Mongo backed courses, regardless
of email feature flag, and that the view is conditionally available when
Course Auth is turned on.
"""
from django.test.utils import override_settings
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from unittest.case import SkipTest
from courseware.tests.tests import TEST_DATA_MONGO_MODULESTORE
from student.tests.factories import UserFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from bulk_email.models import CourseAuthorization
from mock import patch
@override_settings(MODULESTORE=TEST_DATA_MONGO_MODULESTORE)
class TestStudentDashboardEmailView(ModuleStoreTestCase):
"""
Check for email view displayed with flag
"""
def setUp(self):
self.course = CourseFactory.create()
# Create student account
student = UserFactory.create()
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
self.client.login(username=student.username, password="test")
try:
# URL for dashboard
self.url = reverse('dashboard')
except NoReverseMatch:
raise SkipTest("Skip this test if url cannot be found (ie running from CMS tests)")
# URL for email settings modal
self.email_modal_link = (
('<a href="#email-settings-modal" class="email-settings" rel="leanModal" '
'data-course-id="{0}/{1}/{2}" data-course-number="{1}" '
'data-optout="False">Email Settings</a>').format(
self.course.org,
self.course.number,
self.course.display_name.replace(' ', '_')
)
)
def tearDown(self):
"""
Undo all patches.
"""
patch.stopall()
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_true(self):
# Assert that the URL for the email view is in the response
response = self.client.get(self.url)
self.assertTrue(self.email_modal_link in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': False})
def test_email_flag_false(self):
# Assert that the URL for the email view is not in the response
response = self.client.get(self.url)
self.assertFalse(self.email_modal_link in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_email_unauthorized(self):
# Assert that instructor email is not enabled for this course
self.assertFalse(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
# if this course isn't authorized
response = self.client.get(self.url)
self.assertFalse(self.email_modal_link in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': True})
def test_email_authorized(self):
# Authorize the course to use email
cauth = CourseAuthorization(course_id=self.course.id, email_enabled=True)
cauth.save()
# Assert that instructor email is enabled for this course
self.assertTrue(CourseAuthorization.instructor_email_enabled(self.course.id))
# Assert that the URL for the email view is not in the response
# if this course isn't authorized
response = self.client.get(self.url)
self.assertTrue(self.email_modal_link in response.content)
@override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE)
class TestStudentDashboardEmailViewXMLBacked(ModuleStoreTestCase):
"""
Check for email view on student dashboard, with XML backed course.
"""
def setUp(self):
self.course_name = 'edX/toy/2012_Fall'
# Create student account
student = UserFactory.create()
CourseEnrollmentFactory.create(
user=student,
course_id=SlashSeparatedCourseKey.from_deprecated_string(self.course_name)
)
self.client.login(username=student.username, password="test")
try:
# URL for dashboard
self.url = reverse('dashboard')
except NoReverseMatch:
raise SkipTest("Skip this test if url cannot be found (ie running from CMS tests)")
# URL for email settings modal
self.email_modal_link = (
('<a href="#email-settings-modal" class="email-settings" rel="leanModal" '
'data-course-id="{0}/{1}/{2}" data-course-number="{1}" '
'data-optout="False">Email Settings</a>').format(
'edX',
'toy',
'2012_Fall'
)
)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_true_xml_store(self):
# The flag is enabled, and since REQUIRE_COURSE_EMAIL_AUTH is False, all courses should
# be authorized to use email. But the course is not Mongo-backed (should not work)
response = self.client.get(self.url)
self.assertFalse(self.email_modal_link in response.content)
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': False, 'REQUIRE_COURSE_EMAIL_AUTH': False})
def test_email_flag_false_xml_store(self):
# Email disabled, shouldn't see link.
response = self.client.get(self.url)
self.assertFalse(self.email_modal_link in response.content)
|
geekaia/edx-platform
|
common/djangoapps/student/tests/test_bulk_email_settings.py
|
Python
|
agpl-3.0
| 5,937
|
# This file is part of Firemix.
#
# Copyright 2013-2016 Jonathan Evans <jon@craftyjon.com>
#
# Firemix is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Firemix is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Firemix. If not, see <http://www.gnu.org/licenses/>.
#
from builtins import range
from builtins import object
import sys
import numpy as np
import socket
import array
import struct
import time
from copy import deepcopy
from collections import defaultdict
from lib.colors import hls_to_rgb
from lib.colors import hls_to_rgb_perceptual
from lib.buffer_utils import BufferUtils, struct_flat
USE_OPC = True
class Networking(object):
def __init__(self, app):
self.socket = None
self.context = None
self._app = app
self.running = True
self.open_socket()
# Maps client type to list of packet buffers
self._packet_cache = {}
self.port = 3020
self.opc_port = 7890
def open_socket(self):
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
def write_buffer(self, buffer):
"""
Performs a bulk strand write.
Decodes the HLS-Float data according to client settings
"""
strand_settings = self._app.scene.get_strand_settings()
clients = [client for client in self._app.settings['networking']['clients'] if client["enabled"]]
clients_by_type = defaultdict(list)
have_non_dimmed = False
for c in clients:
clients_by_type[c.get("protocol", "Legacy")].append(c)
dimmed_legacy_clients = [c for c in clients_by_type["Legacy"] if not c.get('ignore-dimming')]
undimmed_legacy_clients = [c for c in clients_by_type["Legacy"] if c.get('ignore-dimming')]
opc_clients = clients_by_type["OPC"]
if undimmed_legacy_clients:
# Protect against presets or transitions that write float data.
buffer_rgb = hls_to_rgb_perceptual(buffer)
buffer_rgb_int = np.int8(struct_flat(buffer_rgb) * 255)
self._write_legacy(buffer_rgb_int, strand_settings, undimmed_legacy_clients)
# Now that we've written to clients that don't want dimmed data, apply
# the global dimmer from the mixer and re-convert to RGB
if self._app.mixer.global_dimmer < 1.0:
buffer['light'] *= self._app.mixer.global_dimmer
if self._app.mixer.useColorCorrections:
buffer_rgb = hls_to_rgb_perceptual(buffer)
else:
buffer_rgb = hls_to_rgb(buffer)
buffer_rgb_int = np.int8(struct_flat(buffer_rgb) * 255)
if dimmed_legacy_clients:
self._write_legacy(buffer_rgb_int, strand_settings, dimmed_legacy_clients)
if opc_clients:
self._write_opc(buffer_rgb_int, strand_settings, opc_clients)
def _write_legacy(self, buf, strand_settings, clients):
packets = []
if 'legacy' not in self._packet_cache:
self._packet_cache['legacy'] = [None] * len(strand_settings)
for strand in range(len(strand_settings)):
if not strand_settings[strand]["enabled"]:
continue
start, end = BufferUtils.get_strand_extents(strand)
start *= 3
end *= 3
packet_header_size = 4
packet_size = (end-start) + packet_header_size
packet = self._packet_cache['legacy'][strand]
if packet is None:
packet = np.zeros(packet_size, dtype=np.int8)
self._packet_cache['legacy'][strand] = packet
length = packet_size - packet_header_size
packet[0] = ord('S')
packet[1] = strand
packet[2] = length & 0x00FF
packet[3] = (length & 0xFF00) >> 8
np.copyto(packet[packet_header_size:], buf[start:end])
packets.append(packet)
for client in clients:
try:
self.socket.sendto(array.array('B', [ord('B')]), (client["host"], client["port"]))
for packet in packets:
self.socket.sendto(packet, (client["host"], client["port"]))
self.socket.sendto(array.array('B', [ord('E')]), (client["host"], client["port"]))
except socket.gaierror:
print("Bad hostname: ", client["host"])
continue
except:
continue
def _write_opc(self, buf, strand_settings, clients):
packet_data_len = len(buf)
packet_size = packet_data_len + 4
if 'opc' not in self._packet_cache:
self._packet_cache['opc'] = [np.empty(packet_size, dtype=np.int8)]
packet = self._packet_cache['opc'][0]
# OPC happens to look a lot like our existing protocol.
# Byte 0 is channel (aka strand). 0 is broadcast address, indexing starts at 1.
# Byte 1 is command, always 0 for "set pixel colors"
# Bytes 2 and 3 are big-endian length of the data block.
#
# Both LEDScape and the OPC reference implementation actually seem to
# ignore the strand address and just assume that the data for all
# strands is sent in a single broadcast packet. So, we do that here.
packet[0] = 0
packet[1] = 0
packet[2] = (packet_data_len & 0xFF00) >> 8
packet[3] = (packet_data_len & 0xFF)
np.copyto(packet[4:], buf)
for client in clients:
self.socket.sendto(packet, (client["host"], client["port"]))
|
Openlights/firemix
|
core/networking.py
|
Python
|
gpl-3.0
| 6,048
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.scatter."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _AsType(v, vtype):
return v.astype(vtype) if isinstance(v, np.ndarray) else vtype(v)
def _NumpyAdd(ref, indices, updates):
# Since numpy advanced assignment does not support repeated indices,
# we run a simple loop to perform scatter_add.
for i, indx in np.ndenumerate(indices):
ref[indx] += updates[i]
def _NumpyAddScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] += update
def _NumpySub(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] -= updates[i]
def _NumpySubScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] -= update
def _NumpyMul(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] *= updates[i]
def _NumpyMulScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] *= update
def _NumpyDiv(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] /= updates[i]
def _NumpyDivScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] /= update
def _NumpyMin(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], updates[i])
def _NumpyMinScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.minimum(ref[indx], update)
def _NumpyMax(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], updates[i])
def _NumpyMaxScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = np.maximum(ref[indx], update)
def _NumpyUpdate(ref, indices, updates):
for i, indx in np.ndenumerate(indices):
ref[indx] = updates[i]
def _NumpyUpdateScalar(ref, indices, update):
for _, indx in np.ndenumerate(indices):
ref[indx] = update
_TF_OPS_TO_NUMPY = {
state_ops.scatter_update: _NumpyUpdate,
state_ops.scatter_add: _NumpyAdd,
state_ops.scatter_sub: _NumpySub,
state_ops.scatter_mul: _NumpyMul,
state_ops.scatter_div: _NumpyDiv,
state_ops.scatter_min: _NumpyMin,
state_ops.scatter_max: _NumpyMax,
}
_TF_OPS_TO_NUMPY_SCALAR = {
state_ops.scatter_update: _NumpyUpdateScalar,
state_ops.scatter_add: _NumpyAddScalar,
state_ops.scatter_sub: _NumpySubScalar,
state_ops.scatter_mul: _NumpyMulScalar,
state_ops.scatter_div: _NumpyDivScalar,
state_ops.scatter_min: _NumpyMinScalar,
state_ops.scatter_max: _NumpyMaxScalar,
}
class ScatterTest(test.TestCase):
def _VariableRankTest(self,
tf_scatter,
vtype,
itype,
repeat_indices=False,
updates_are_scalar=False):
np.random.seed(8)
with self.cached_session(use_gpu=True):
for indices_shape in (), (2,), (3, 7), (3, 4, 7):
for extra_shape in (), (5,), (5, 9):
# Generate random indices with no duplicates for easy numpy comparison
size = np.prod(indices_shape, dtype=itype)
first_dim = 3 * size
indices = np.arange(first_dim)
np.random.shuffle(indices)
indices = indices[:size]
if size > 1 and repeat_indices:
# Add some random repeats.
indices = indices[:size // 2]
for _ in range(size - size // 2):
# Randomly append some repeats.
indices = np.append(indices,
indices[np.random.randint(size // 2)])
np.random.shuffle(indices)
indices = indices.reshape(indices_shape)
if updates_are_scalar:
updates = _AsType(np.random.randn(), vtype)
else:
updates = _AsType(
np.random.randn(*(indices_shape + extra_shape)), vtype)
# Clips small values to avoid division by zero.
def clip_small_values(x):
threshold = 1e-4
sign = np.sign(x)
if isinstance(x, np.int32):
threshold = 1
sign = np.random.choice([-1, 1])
return threshold * sign if np.abs(x) < threshold else x
updates = np.vectorize(clip_small_values)(updates)
old = _AsType(np.random.randn(*((first_dim,) + extra_shape)), vtype)
# Scatter via numpy
new = old.copy()
if updates_are_scalar:
np_scatter = _TF_OPS_TO_NUMPY_SCALAR[tf_scatter]
else:
np_scatter = _TF_OPS_TO_NUMPY[tf_scatter]
np_scatter(new, indices, updates)
# Scatter via tensorflow
ref = variables.Variable(old)
self.evaluate(ref.initializer)
self.evaluate(tf_scatter(ref, indices, updates))
self.assertAllClose(self.evaluate(ref), new)
def _VariableRankTests(self,
tf_scatter,
repeat_indices=False,
updates_are_scalar=False):
vtypes = [np.float32, np.float64]
if tf_scatter != state_ops.scatter_div:
vtypes.append(np.int32)
for vtype in vtypes:
for itype in (np.int32, np.int64):
self._VariableRankTest(tf_scatter, vtype, itype, repeat_indices,
updates_are_scalar)
def testVariableRankUpdate(self):
self._VariableRankTests(state_ops.scatter_update, False)
def testVariableRankAdd(self):
self._VariableRankTests(state_ops.scatter_add, False)
def testVariableRankSub(self):
self._VariableRankTests(state_ops.scatter_sub, False)
def testVariableRankMul(self):
self._VariableRankTests(state_ops.scatter_mul, False)
def testVariableRankDiv(self):
self._VariableRankTests(state_ops.scatter_div, False)
def testVariableRankMin(self):
self._VariableRankTests(state_ops.scatter_min, False)
def testVariableRankMax(self):
self._VariableRankTests(state_ops.scatter_max, False)
def testRepeatIndicesAdd(self):
self._VariableRankTests(state_ops.scatter_add, True)
def testRepeatIndicesSub(self):
self._VariableRankTests(state_ops.scatter_sub, True)
def testRepeatIndicesMul(self):
self._VariableRankTests(state_ops.scatter_mul, True)
def testRepeatIndicesDiv(self):
self._VariableRankTests(state_ops.scatter_div, True)
def testRepeatIndicesMin(self):
self._VariableRankTests(state_ops.scatter_min, True)
def testRepeatIndicesMax(self):
self._VariableRankTests(state_ops.scatter_max, True)
def testVariableRankUpdateScalar(self):
self._VariableRankTests(state_ops.scatter_update, False, True)
def testVariableRankAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, False, True)
def testVariableRankSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, False, True)
def testVariableRankMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, False, True)
def testVariableRankDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, False, True)
def testVariableRankMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, False, True)
def testVariableRankMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, False, True)
def testRepeatIndicesAddScalar(self):
self._VariableRankTests(state_ops.scatter_add, True, True)
def testRepeatIndicesSubScalar(self):
self._VariableRankTests(state_ops.scatter_sub, True, True)
def testRepeatIndicesMulScalar(self):
self._VariableRankTests(state_ops.scatter_mul, True, True)
def testRepeatIndicesDivScalar(self):
self._VariableRankTests(state_ops.scatter_div, True, True)
def testRepeatIndicesMinScalar(self):
self._VariableRankTests(state_ops.scatter_min, True, True)
def testRepeatIndicesMaxScalar(self):
self._VariableRankTests(state_ops.scatter_max, True, True)
def testBooleanScatterUpdate(self):
if not test.is_gpu_available():
with self.session(use_gpu=False):
var = variables.Variable([True, False])
update0 = state_ops.scatter_update(var, 1, True)
update1 = state_ops.scatter_update(
var, constant_op.constant(
0, dtype=dtypes.int64), False)
self.evaluate(var.initializer)
self.evaluate([update0, update1])
self.assertAllEqual([False, True], self.evaluate(var))
def testScatterOutOfRangeCpu(self):
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
if not test.is_gpu_available():
with self.session(use_gpu=False):
ref = variables.Variable(params)
self.evaluate(ref.initializer)
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
self.evaluate(op(ref, indices, updates))
# Test some out of range errors.
indices = np.array([-1, 0, 5])
with self.assertRaisesOpError(
r'indices\[0\] = -1 is not in \[0, 6\)'):
self.evaluate(op(ref, indices, updates))
indices = np.array([2, 0, 6])
with self.assertRaisesOpError(r'indices\[2\] = 6 is not in \[0, 6\)'):
self.evaluate(op(ref, indices, updates))
# TODO(fpmc): Re-enable this test when gpu_pip test actually runs on a GPU.
def _disabledTestScatterOutOfRangeGpu(self):
if test.is_gpu_available():
return
for op, _ in _TF_OPS_TO_NUMPY.items():
params = np.array([1, 2, 3, 4, 5, 6]).astype(np.float32)
updates = np.array([-3, -4, -5]).astype(np.float32)
# With GPU, the code ignores indices that are out of range.
# We don't test the implementation; just test there's no failures.
with test_util.force_gpu():
ref = variables.Variable(params)
self.evaluate(ref.initializer)
# Indices all in range, no problem.
indices = np.array([2, 0, 5])
self.evaluate(op(ref, indices, updates))
# Indices out of range should not fail.
indices = np.array([-1, 0, 5])
self.evaluate(op(ref, indices, updates))
indices = np.array([2, 0, 6])
self.evaluate(op(ref, indices, updates))
if __name__ == '__main__':
test.main()
|
gunan/tensorflow
|
tensorflow/python/kernel_tests/scatter_ops_test.py
|
Python
|
apache-2.0
| 11,378
|
from __future__ import division
import os
import timeit
#folder hierarchy
#path = 'C:\Users\libpub\Desktop\SForkCrowFinal'
path=os.getcwd()
def fold(path, output=None):
writePath = str(path)+'\\foldStruct.txt'
file=open(writePath, 'w')
for root, dirs, files in os.walk(path):
level = root.replace(path, '').count(os.sep)
indent = ' ' * 4 * (level)
if output == 'y':
file.write('{}{}/'.format(indent, os.path.basename(root)) + '\n')
else:
print('{}{}/'.format(indent, os.path.basename(root)))
file.close()
def foldFiles(path, output=None):
writePath = str(path)+'\\fileStruct.txt'
file=open(writePath, 'w')
for root, dirs, files in os.walk(path):
level = root.replace(path, '').count(os.sep)
indent = ' ' * 4 * (level)
#print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
file.write('{}{}/'.format(indent, os.path.basename(root)) + '\n')
for f in files:
if output == 'y':
file.write('{}{}'.format(subindent, f) + '\n')
else:
print('{}{}'.format(subindent, f))
file.close()
def humanize_bytes(bytes, precision=1):
"""Return a humanized string representation of a number of bytes.
Assumes `from __future__ import division`.
>>> humanize_bytes(1)
'1 byte'
>>> humanize_bytes(1024)
'1.0 kB'
>>> humanize_bytes(1024*123)
'123.0 kB'
>>> humanize_bytes(1024*12342)
'12.1 MB'
>>> humanize_bytes(1024*12342,2)
'12.05 MB'
>>> humanize_bytes(1024*1234,2)
'1.21 MB'
>>> humanize_bytes(1024*1234*1111,2)
'1.31 GB'
>>> humanize_bytes(1024*1234*1111,1)
'1.3 GB'
"""
abbrevs = (
(1<<50L, 'PB'),
(1<<40L, 'TB'),
(1<<30L, 'GB'),
(1<<20L, 'MB'),
(1<<10L, 'kB'),
(1, 'bytes')
)
if bytes == 1:
return '1 byte'
for factor, suffix in abbrevs:
if bytes >= factor:
break
return '%.*f %s' % (precision, bytes / factor, suffix)
def sizeQuery(d):
totalSize=0
for v in d:
value = d[v]
valueSplit = value.split(None,1)
totalSize += float(valueSplit[0])
maxFileSize= max(d.values())
for k,v in d.items():
if v == maxFileSize:
maxFile = k
return (totalSize, maxFileSize, maxFile)
def metadataQuery(path, fileTypes, output=None):
metadata=[]
metadataTypes = {
'.pdf':0,
'.xml':0,
'.htm':0,
'.html':0,
'.doc':0,
'.docx':0,
'.txt':0
}
metadataSize={}
likelyMetadata=[]
for root, dirs, files in os.walk(path):
for f in fileTypes:
fileCount=0
for name in files:
# Counts file if extension matches one found in fileTypes
nameLow=name.lower()
if any(nameLow.endswith(x) for x in (metadataTypes)):
if nameLow not in metadata:
if nameLow not in ['filestruct.txt', 'foldstruct.txt', 'metadata_log.txt', 'file_desc.txt']:
namePath = os.path.abspath(os.path.join(root, nameLow))
size = os.path.getsize(namePath)
sizeBytes = humanize_bytes(size)
metadata.append(nameLow)
metadataSize.update({nameLow:sizeBytes})
if 'metadata' in nameLow:
likelyMetadata.append(nameLow)
elif 'meta' in nameLow:
likelyMetadata.append(nameLow)
for f in metadata:
for x in metadataTypes:
if f.endswith(x):
metadataTypes[x]+=1
totalSize, maxFileSize, maxFile = sizeQuery(metadataSize)
if output == 'y':
# WRITE TO FILE
indent = ' ' * 4
count = 0
writePath = str(path)+'\\metadata_log.txt'
file=open(writePath, 'w')
file.write('Folder contains the following metadata file types and count: \n')
for key, value in sorted(metadataTypes.items()):
if value >0:
if len(key)==4:
if count == 0:
keyval = indent + str(key)+ ' : '+ str(value)
count = 1
else:
keyval = ',' + indent + str(key)+ ' : '+ str(value)
#count+=1
#keyval = indent + str(key)+ ' : '+ str(value) + ','
file.write(keyval)
else:
if count == 0:
keyval = indent + str(key)+ ': '+ str(value)
else:
keyval = ',' + indent + str(key)+ ': '+ str(value)
count+=1
#keyval = indent + str(key)+ ': '+ str(value)+ ','
file.write(keyval)
file.write("\n\nThe following may be metadata files: \n")
count = 0
for x in sorted(metadata):
if count == 0:
file.write(indent + x)
count=1
else:
file.write(',' + indent + x)
count = 0
if len(likelyMetadata) == 0:
pass
else:
file.write("\n\nThe following are likely metadata files: \n")
for x in sorted(likelyMetadata):
if count == 0:
file.write(indent + x)
count=1
else:
file.write(',' + indent + x)
file.write('\n\nTotal metadata files: ' + str(len(metadata))+ '\n')
#file.write('\nTotal metadata file size: '+'{:,}'.format(totalSize)+'kB'+ '\n')
#file.write('\nLargest file: '+maxFile+ ' - '+ maxFileSize+ '\n')
file.close()
else:
print 'Folder contains the following metadata file types and count: '
for key, value in sorted(metadataTypes.items()):
if len(key)==4:
print key, ' : ', value
else:
print key, ': ', value
print "\nThe following may be metadata files: "
print sorted(metadata)
print '\nTotal metadata files: ',str(len(metadata))
#print metadataSize
print '\nTotal metadata file size :','{:,}'.format(totalSize), 'kB'
print '\nLargest file: ',maxFile, '-', maxFileSize
def countFilesasDict(path, output=None):
fileTypes ={}
failedTypes={}
extCount=0
fileCountTotal=0
failedTypesCount=0
fileCount = 0
for root, dirs, files in os.walk(path):
for name in files:
extension = os.path.splitext(name)[1]
extLow=extension.lower()
if extLow in fileTypes:
pass
# Catch files with no extension (log, gdb) and adds to list failedTypes
elif '.' not in name:
if name not in failedTypes:
failedTypes.update({name:fileCount})
# Add extension to list fileTypes
else:
if extLow not in ['.py']:
fileTypes.update({extLow:fileCount})
extCount+=1
for root, dirs, files in os.walk(path):
for f in fileTypes:
fileCount=0
for name in files:
# Counts file if extension matches one found in fileTypes
nameLow=name.lower()
if nameLow.endswith(f):
if nameLow not in ['filestruct.txt', 'foldstruct.txt', 'metadata_log.txt', 'file_desc.txt']:
if nameLow.endswith('.py'):
pass
else:
fileTypes[f]+=1
fileCount+=1
fileCountTotal+=fileCount
for f in failedTypes:
fileCount=0
for name in files:
# Counts files with no extension by matching to failedTypes list
if name == f:
fileCount+=1
failedTypes[f]+=1
fileCountTotal+=fileCount
if output == 'y':
# WRITE TO FILE
indent = ' ' * 4
count = 0
writePath = str(path)+'\\file_desc.txt'
file=open(writePath, 'w')
file.write('Folder contains the following file types and statistics: \n')
for key, value in sorted(fileTypes.items()):
if key not in ['.py']:
if count == 0:
keyval = indent + str(key) + ' : ' + str(value)
count = 1
else:
keyval = ','+indent + str(key)+ ' : '+ str(value)
file.write(keyval)
if len(failedTypes) == 0:
pass
else:
file.write('\n\nThe following files do not have an extension: \n')
for key, value in sorted(failedTypes.items()):
keyval = indent + str(key)+ ' : '+ str(value) + '\n'
file.write(keyval)
file.write("\nTotal Files Counted: "+str(fileCountTotal))
file.write("\nNumber of Unique Extensions: "+str(extCount))
file.close()
else:
pass
# MISC PRINT STATEMENTS FOR VALUES
#----------------------------------
#print fileTypes
#print failedTypes
#print failedTypesCount
#print "Total Files Counted: ",fileCountTotal
#print "NO EXTENSION FOR: ", failedTypes
#print "Number of Unique Extensions:",extCount
return fileTypes
tic = timeit.default_timer()
print "------------------"
print 'Current working directory = ' + str((os.getcwd()))
print "------------------"
countFilesasDict(path, 'y')
print 'File descriptions created.'
fileTypes = countFilesasDict(path)
fold(path, 'y')
print 'Folder structure created.'
foldFiles(path, 'y')
print 'File structure created.'
metadataQuery(path, fileTypes, 'y')
print 'Metadata log created.'
print "------------------"
print "COMPLETE IN: "
toc = timeit.default_timer()
timeelapsed = "{0:.2f}".format(toc-tic)
print str(timeelapsed) + ' seconds'
|
borchert/metadata-tools
|
file-analysis/mapLib_foldercheck.py
|
Python
|
mit
| 10,472
|
'''
DMRG Engine.
'''
from numpy import *
from scipy.sparse.linalg import eigsh
from scipy.linalg import eigh,svd,eigvalsh
from numpy.linalg import norm
from numpy import kron as dkron
from matplotlib.pyplot import *
import scipy.sparse as sps
import copy,time,pdb,warnings,numbers
from blockmatrix.blocklib import eigbsh,eigbh,get_blockmarker,svdb
from tba.hgen import SpinSpaceConfig,ind2c,Z4scfg
from rglib.mps import MPS,OpString,tensor,insert_Zs
from rglib.hexpand import NullEvolutor,MaskedEvolutor
from tba.hgen import kron_csr as kron
from blockmatrix import SimpleBMG,sign4bm,show_bm,trunc_bm
from disc_symm import SymmetryHandler
from superblock import SuperBlock,site_image,joint_extract_block
from pydavidson import JDh
from flib.flib import fget_subblock_dmrg
__all__=['site_image','SuperBlock','DMRGEngine','fix_tail']
ZERO_REF=1e-12
def _eliminate_zeros(A,zero_ref):
'''eliminate zeros from a sparse matrix.'''
if not isinstance(A,sps.csr_matrix): A=A.tocsr()
A.data[abs(A.data)<zero_ref]=0; A.eliminate_zeros()
return A
def _gen_hamiltonian_full(HL0,HR0,hgen_l,hgen_r,interop):
'''Get the full hamiltonian.'''
ndiml,ndimr=HL0.shape[0],HR0.shape[0]
H1,H2=kron(HL0,sps.identity(ndimr)),kron(sps.identity(ndiml),HR0)
H=H1+H2
#get the link hamiltonians
sb=SuperBlock(hgen_l,hgen_r)
Hin=[]
for op in interop:
Hin.append(sb.get_op(op))
H=H+sum(Hin)
H=_eliminate_zeros(H,ZERO_REF)
return H
def _gen_hamiltonian_block0(HL0,HR0,hgen_l,hgen_r,interop,blockinfo):
'''Get the combined hamiltonian for specific block.'''
ndiml,ndimr=HL0.shape[0],HR0.shape[0]
bml,bmr,pml,pmr,bmg,target_block=blockinfo['bml'],blockinfo['bmr'],blockinfo['pml'],blockinfo['pmr'],blockinfo['bmg'],blockinfo['target_block']
bm_tot,pm=bmg.join_bms([bml,bmr]).compact_form()
pm=((pml*len(pmr))[:,newaxis]+pmr).ravel()[pm]
t0=time.time()
H1,H2=kron(HL0,sps.identity(ndimr)),kron(sps.identity(ndiml),HR0)
t1=time.time()
indices=pm[bm_tot.get_slice(target_block,uselabel=True)]
H1,H2=H1.tocsr()[indices][:,indices],H2.tocsr()[indices][:,indices]
Hc=H1+H2
sb=SuperBlock(hgen_l,hgen_r)
for op in interop:
Hc=Hc+(sb.get_op(op)).tocsr()[indices][:,indices]
t2=time.time()
print 'Generate Hamiltonian %s, %s'%(t1-t0,t2-t1)
return Hc,bm_tot,pm
def _gen_hamiltonian_block(HL0,HR0,hgen_l,hgen_r,interop,blockinfo):
'''Get the combined hamiltonian for specific block.'''
ndiml,ndimr=HL0.shape[0],HR0.shape[0]
bm_tot,pm=blockinfo['bmg'].join_bms([blockinfo['bml'],blockinfo['bmr']]).compact_form()
pm=((blockinfo['pml']*ndimr)[:,newaxis]+blockinfo['pmr']).ravel()[pm]
indices=pm[bm_tot.get_slice(blockinfo['target_block'],uselabel=True)]
cinds=ind2c(indices,N=[ndiml,ndimr])
t0=time.time()
H1=fget_subblock_dmrg(hl=HL0.toarray(),hr=identity(ndimr),indices=cinds,is_identity=2)
H2=fget_subblock_dmrg(hl=identity(ndiml),hr=HR0.toarray(),indices=cinds,is_identity=1)
Hc=H1+H2
t1=time.time()
sb=SuperBlock(hgen_l,hgen_r)
for op in interop:
Hc=Hc+sb.get_op(op,indices=cinds)
t2=time.time()
print 'Generate Hamiltonian %s, %s'%(t1-t0,t2-t1)
return sps.csr_matrix(Hc),bm_tot,pm
def _get_mps(hgen_l,hgen_r,phi,direction,labels):
'''Combining hgen_l and hgen_r to get the matrix product state.'''
NL,NR=hgen_l.N,hgen_r.N
phi=tensor.Tensor(phi,labels=['al','sl+1','al+2','sl+2']) #l=NL-1
if direction=='->':
A=hgen_l.evolutor.A(NL-1,dense=True) #get A[sNL](NL-1,NL)
A=tensor.Tensor(A,labels=['sl+1','al','al+1\''])
phi=tensor.contract([A,phi])
phi=phi.chorder([0,2,1]) #now we get phi(al+1,sl+2,al+2)
#decouple phi into S*B, B is column-wise othorgonal
U,S,V=svd(phi.reshape([phi.shape[0],-1]),full_matrices=False)
U=tensor.Tensor(U,labels=['al+1\'','al+1'])
A=(A*U) #get A(al,sl+1,al+1)
B=transpose(V.reshape([S.shape[0],phi.shape[1],phi.shape[2]]),axes=(1,2,0)) #al+1,sl+2,al+2 -> sl+2,al+2,al+1, stored in column wise othorgonal format
else:
B=hgen_r.evolutor.A(NR-1,dense=True) #get B[sNR](NL+1,NL+2)
B=tensor.Tensor(B,labels=['sl+2','al+2','al+1\'']).conj() #!the conjugate?
phi=tensor.contract([phi,B])
#decouple phi into A*S, A is row-wise othorgonal
U,S,V=svd(phi.reshape([phi.shape[0]*phi.shape[1],-1]),full_matrices=False)
V=tensor.Tensor(V,labels=['al+1','al+1\''])
B=(V*B).chorder([1,2,0]).conj() #al+1,sl+2,al+2 -> sl+2,al+2,al+1, for B is in transposed order by default.
A=transpose(U.reshape([phi.shape[0],phi.shape[1],S.shape[0]]),axes=(1,0,2)) #al,sl+1,al+1 -> sl+1,al,al+1, stored in column wise othorgonal format
AL=hgen_l.evolutor.get_AL(dense=True)[:-1]+[A]
BL=[B]+hgen_r.evolutor.get_AL(dense=True)[::-1][1:]
AL=[transpose(ai,axes=(1,0,2)) for ai in AL]
BL=[transpose(bi,axes=(1,0,2)).conj() for bi in BL] #transpose
mps=MPS(AL=AL,BL=BL,S=S,labels=labels,forder=range(NL)+range(NL,NL+NR)[::-1])
return mps
class DMRGEngine(object):
'''
DMRG Engine.
Attributes:
:hgen: <ExpandGenerator>, hamiltonian Generator.
:bmg: <BlockMarkerGenerator>, the block marker generator.
:tol: float, the tolerence, when maxN and tol are both set, we keep the lower dimension.
:reflect: bool, True if left<->right reflect, can be used to shortcut the run time.
:eigen_solver: str,
* 'JD', Jacobi-Davidson iteration.
* 'LC', Lanczos, algorithm.
:iprint: int, the redundency level of output information, 0 for None, 10 for debug.
:symm_handler: <SymmetryHandler>, the discrete symmetry handler.
:LPART/RPART: dict, the left/right sweep of hamiltonian generators.
:_tails(private): list, the last item of A matrices, which is used to construct the <MPS>.
'''
def __init__(self,hgen,tol=0,reflect=False,eigen_solver='LC',iprint=1):
self.tol=tol
self.hgen=hgen
self.eigen_solver=eigen_solver
#the symmetries
self.reflect=reflect
self.bmg=None
self._target_block=None
self.symm_handler=SymmetryHandler({},detect_scope=1)
#claim attributes with dummy values.
self._tails=None
self.LPART=None
self.RPART=None
self.iprint=iprint
#status
self.status={'isweep':0,'direction':'->','pos':0}
def _eigsh(self,H,v0,projector=None,tol=1e-10,sigma=None,lc_search_space=1,k=1):
'''
solve eigenvalue problem.
'''
maxiter=5000
N=H.shape[0]
if self.iprint==10 and projector is not None and check_commute:
assert(is_commute(H,projector))
if self.eigen_solver=='LC':
k=max(lc_search_space,k)
if H.shape[0]<100:
e,v=eigh(H.toarray())
e,v=e[:k],v[:,:k]
else:
try:
e,v=eigsh(H,k=k,which='SA',maxiter=maxiter,tol=tol,v0=v0)
except:
e,v=eigsh(H,k=k+1,which='SA',maxiter=maxiter,tol=tol,v0=v0)
order=argsort(e)
e,v=e[order],v[:,order]
else:
iprint=0
maxiter=500
if projector is not None:
e,v=JDh(H,v0=v0,k=k,projector=projector,tol=tol,maxiter=maxiter,sigma=sigma,which='SA',iprint=iprint)
else:
if sigma is None:
e,v=JDh(H,v0=v0,k=max(lc_search_space,k),projector=projector,tol=tol,maxiter=maxiter,which='SA',iprint=iprint)
else:
e,v=JDh(H,v0=v0,k=k,projector=projector,tol=tol,sigma=sigma,which='SL',\
iprint=iprint,converge_bound=1e-10,maxiter=maxiter)
nstate=len(e)
if nstate==0:
raise Exception('No Converged Pair!!')
elif nstate==k or k>1:
return e,v
#filter out states meeting projector.
if projector is not None and lc_search_space!=1:
overlaps=array([abs(projector.dot(v[:,i]).conj().dot(v[:,i])) for i in xrange(nstate)])
mask0=overlaps>0.1
if not any(mask0):
raise Exception('Can not find any states meeting specific parity!')
mask=overlaps>0.9
if sum(mask)==0:
#check for degeneracy.
istate=where(mask0)[0][0]
warnings.warn('Wrong result or degeneracy accur!')
else:
istate=where(mask)[0][0]
v=projector.dot(v[:,istate:istate+1])
v=v/norm(v)
return e[istate:istate+1],v
else:
#get the state with maximum overlap.
v0H=v0.conj()/norm(v0)
overlaps=array([abs(v0H.dot(v[:,i])) for i in xrange(nstate)])
istate=argmax(overlaps)
if overlaps[istate]<0.7:
warnings.warn('Do not find any states same correspond to the one from last iteration!%s'%overlaps)
e,v=e[istate:istate+1],v[:,istate:istate+1]
return e,v
@property
def nsite(self):
'''Number of sites'''
return self.hgen.nsite
def query(self,which,length):
'''
Query the hamiltonian generator of specific part.
which:
`l` -> the left part.
`r` -> the right part.
length:
The length of block.
'''
assert(which=='l' or which=='r')
if which=='l' or self.reflect:
return copy.copy(self.LPART[length])
else:
return copy.copy(self.RPART[length])
def set(self,which,hgen,length=None):
'''
Set the hamiltonian generator for specific part.
Parameters:
:which: str,
* `l` -> the left part.
* `r` -> the right part.
:hgen: <ExpandGenerator>, the RG hamiltonian generator.
:length: int, the length of block, if set, it will do a length check.
'''
assert(length is None or length==hgen.N)
assert(hgen.truncated)
if which=='l' or self.reflect:
self.LPART[hgen.N]=hgen
else:
self.RPART[hgen.N]=hgen
def reset(self):
'''Restore this engine to initial status.'''
#we insert Zs into operator collections to cope with fermionic sign problem.
#and use site image to create a reversed ordering!
hgen_l=copy.deepcopy(self.hgen)
if not isinstance(hgen_l.spaceconfig,SpinSpaceConfig):
insert_Zs(hgen_l.evolutees['H'].opc,spaceconfig=hgen_l.spaceconfig)
self.LPART={0:hgen_l}
if not self.reflect:
hgen_r=copy.deepcopy(self.hgen)
hgen_r.evolutees['H'].opc=site_image(hgen_r.evolutees['H'].opc,NL=0,NR=hgen_r.nsite,care_sign=True)
if not isinstance(hgen_l.spaceconfig,SpinSpaceConfig):
insert_Zs(hgen_r.evolutees['H'].opc,spaceconfig=hgen_r.spaceconfig)
self.RPART={0:hgen_r}
def use_disc_symmetry(self,target_sector,detect_scope=2):
'''
Use specific discrete symmetry.
Parameters:
:target_sector: dict, {name:parity} pairs.
:detect_scope:
'''
if target_sector.has_key('C') and not self.reflect:
raise Exception('Using C2 symmetry without reflection symmetry is unreliable, forbiden for safety!')
symm_handler=SymmetryHandler(target_sector,detect_scope=detect_scope)
if target_sector.has_key('P'): #register flip evolutee.
handler=symm_handler.handlers['P']
self.hgen.register_evolutee('P',opc=prod([handler.P(i) for i in xrange(self.hgen.nsite)]),initial_data=sps.identity(1))
if target_sector.has_key('J'): #register p-h evolutee.
handler=symm_handler.handlers['J']
self.hgen.register_evolutee('J',opc=prod([handler.J(i) for i in xrange(self.hgen.nsite)]),initial_data=sps.identity(1))
self.symm_handler=symm_handler
def use_U1_symmetry(self,qnumber,target_block):
'''
Use specific U1 symmetry.
'''
self.bmg=SimpleBMG(spaceconfig=self.hgen.spaceconfig,qstring=qnumber)
self._target_block=target_block
@property
def target_block(self):
'''Get the target block.'''
target_block=self._target_block
if hasattr(target_block,'__call__'):
n,pos=self.status['isweep'],self.status['pos']
nsite=self.nsite
if n==0 and pos<nsite/2: nsite=pos*2
target_block=target_block(nsite=nsite)
return target_block
def run_finite(self,endpoint=None,tol=0,maxN=20,nlevel=1,call_before=None,call_after=None):
'''
Run the application.
Parameters:
:endpoint: tuple, the end position tuple of (sweep, direction, size of left-block).
:tol: float, the rolerence of energy.
:maxN: int, maximum number of kept states and the tolerence for truncation weight.
:nlevel: int, the number of desired energy levels.
:call_before/call_after: function/None, the function to call back before/after each iteration, using `DMRGEngine` as an parameter.
Return:
tuple, the ground state energy and the ground state(in <MPS> form).
'''
EL=[]
#check the validity of datas.
if isinstance(self.hgen.evolutor,NullEvolutor):
raise ValueError('The evolutor must not be null!')
if not self.symm_handler==None and nlevel!=1:
raise NotImplementedError('The symmetric Handler can not be used in multi-level calculation!')
if not self.symm_handler==None and self.bmg is None:
raise NotImplementedError('The symmetric Handler can not without Block marker generator!')
self.reset()
nsite=self.hgen.nsite
if endpoint is None: endpoint=(4,'<-',0)
maxsweep,end_direction,end_site=endpoint
if ndim(maxN)==0:
maxN=[maxN]*maxsweep
assert(len(maxN)>=maxsweep and end_site<=(nsite-2 if not self.reflect else nsite/2-2))
EG_PRE=Inf
initial_state=None
if self.reflect:
iterators={'->':xrange(nsite/2),'<-':xrange(nsite/2-2,-1,-1)}
else:
iterators={'->':xrange(nsite-1),'<-':xrange(nsite-2,-1,-1)}
for n,m in enumerate(maxN):
for direction in ['->','<-']:
for i in iterators[direction]:
print 'Running %s-th sweep, iteration %s'%(n+1,i)
t0=time.time()
self.status.update({'isweep':n,'pos':i+1,'direction':direction})
if call_before is not None: call_before(self)
#setup generators and operators.
#The cases to use identical hamiltonian generator,
#1. the first half of first sweep.
#2. the reflection is used and left block is same length with right block.
hgen_l=self.query('l',i)
if (n==0 and direction=='->' and i<(nsite+1)/2) or (self.reflect and i==(nsite/2-1) and nsite%2==0):
hgen_r=hgen_l
else:
hgen_r=self.query('r',nsite-i-2)
print 'A'*hgen_l.N+'..'+'B'*hgen_r.N
nsite_true=hgen_l.N+hgen_r.N+2
#run a step
if n<=2:
e_estimate=None
else:
e_estimate=EG[0]
EG,err,phil=self.dmrg_step(hgen_l,hgen_r,tol=tol,maxN=m,
initial_state=initial_state,e_estimate=e_estimate,nlevel=nlevel)
#update LPART and RPART
print 'setting %s-site of left and %s-site of right.'%(hgen_l.N,hgen_r.N)
self.set('l',hgen_l,hgen_l.N)
print 'set L = %s, size %s'%(hgen_l.N,hgen_l.ndim)
if hgen_l is not hgen_r or (not self.reflect and n==0 and i<nsite/2):
#Note: Condition for setting up the right block,
#1. when the left and right part are not the same one.
#2. when the block has not been expanded to full length and not reflecting.
self.set('r',hgen_r,hgen_r.N)
print 'set R = %s, size %s'%(hgen_r.N,hgen_r.ndim)
if call_after is not None: call_after(self)
#do state prediction
initial_state=None #restore initial state.
phi=phil[0]
if nsite==nsite_true:
if self.reflect and nsite%2==0 and (i==nsite/2-2 and direction=='->'):
#Prediction can not be used:
#when we are going to calculate the symmetry point
#and use the reflection symmetry.
#for the right block is instantly replaced by another hamiltonian generator,
#which is not directly connected to the current hamiltonian generator.
initial_state=sum([self.state_prediction(phi,l=i+1,direction=direction) for phi in phil],axis=0).ravel()
elif direction=='->' and i==nsite-2: #for the case without reflection.
initial_state=phil[0].ravel()
elif direction=='<-' and i==0:
initial_state=phil[0].ravel()
else:
if self.reflect and direction=='->' and i==nsite/2-1:
direction='<-' #the turning point of where reflection used.
initial_state=sum([self.state_prediction(phi,l=i+1,direction=direction) for phi in phil],axis=0)
initial_state=initial_state.ravel()
if len(EL)>0:
diff=EG-EL[-1]
else:
diff=Inf
t1=time.time()
print 'EG = %s, dE = %s, Elapse -> %.2f, TruncError -> %s'%(EG,diff,t1-t0,err)
EL.append(EG)
if i==end_site and direction==end_direction:
diff=EG-EG_PRE
print 'MidPoint -> EG = %s, dE = %s'%(EG,diff)
if n==maxsweep-1:
print 'Breaking due to maximum sweep reached!'
return EG,self.get_mps(phi=phil[0],l=i+1,direction=direction)
else:
EG_PRE=EG
def run_infinite(self,maxiter=50,tol=0,maxN=20,nlevel=1):
'''
Run the application.
Parameters:
:maxiter: int, the maximum iteration times.
:tol: float, the rolerence of energy.
:maxN: int/list, maximum number of kept states and the tolerence for truncation weight.
Return:
tuple of EG,MPS.
'''
if isinstance(self.hgen.evolutor,NullEvolutor):
raise ValueError('The evolutor must not be null!')
if not self.symm_handler==None and nlevel!=1:
raise NotImplementedError('The symmetric Handler can not be used in multi-level calculation!')
if not self.symm_handler==None and self.bmg is None:
raise NotImplementedError('The symmetric Handler can not without Block marker generator!')
self.reset()
EL=[]
hgen=copy.deepcopy(self.hgen)
if isinstance(hgen.evolutor,NullEvolutor):
raise ValueError('The evolutor must not be null!')
if maxiter>self.hgen.nsite:
warnings.warn('Max iteration exceeded the chain length!')
for i in xrange(maxiter):
print 'Running iteration %s'%i
t0=time.time()
EG,err,phil=self.dmrg_step(hgen,hgen,tol=tol,nlevel=nlevel)
EG=EG/(2.*(i+1))
if len(EL)>0:
diff=EG-EL[-1]
else:
diff=Inf
t1=time.time()
print 'EG = %.5f, dE = %.2e, Elapse -> %.2f(D=%s), TruncError -> %.2e'%(EG,diff,t1-t0,hgen.ndim,err)
EL.append(EG)
if abs(diff)<tol:
print 'Breaking!'
break
return EG,_get_mps(hgen,hgen,phi=phil[0],direction='->',labels=['s','a'])
def dmrg_step(self,hgen_l,hgen_r,tol=0,maxN=20,e_estimate=None,nlevel=1,initial_state=None):
'''
Run a single step of DMRG iteration.
Parameters:
:hgen_l,hgen_r: <ExpandGenerator>, the hamiltonian generator for left and right blocks.
:tol: float, the rolerence.
:maxN: int, maximum number of kept states and the tolerence for truncation weight.
:initial_state: 1D array/None, the initial state(prediction), None for random.
Return:
tuple of (ground state energy(float), unitary matrix(2D array), kpmask(1D array of bool), truncation error(float))
'''
direction=self.status['direction']
target_block=self.target_block
t0=time.time()
intraop_l,intraop_r,interop=[],[],[]
hndim=hgen_l.hndim
ndiml0,ndimr0=hgen_l.ndim,hgen_r.ndim
NL,NR=hgen_l.N,hgen_r.N
#filter operators to extract left-only and right-only blocks.
interop=filter(lambda op:isinstance(op,OpString) and (NL+1 in op.siteindex),hgen_l.hchain.query(NL)) #site NL and NL+1
OPL=hgen_l.expand1()
HL0=OPL['H']
#expansion can not do twice to the same hamiltonian generator!
if hgen_r is hgen_l:
OPR,HR0=OPL,HL0
else:
OPR=hgen_r.expand1()
HR0=OPR['H']
#blockize HL0 and HR0
NL,NR=hgen_l.N,hgen_r.N
if self.bmg is not None:
n=max(NL,NR)
if isinstance(hgen_l.evolutor,MaskedEvolutor) and n>1:
kpmask_l=hgen_l.evolutor.kpmask(NL-2) #kpmask is also related to block marker!!!
kpmask_r=hgen_r.evolutor.kpmask(NR-2)
bml,pml=self.bmg.update1(trunc_bm(hgen_l.block_marker or self.bmg.bm0,kpmask_l)).compact_form()
bmr,pmr=self.bmg.update1(trunc_bm(hgen_r.block_marker or self.bmg.bm0,kpmask_r)).compact_form()
else:
bml,pml=self.bmg.update1(hgen_l.block_marker).compact_form()
bmr,pmr=self.bmg.update1(hgen_r.block_marker).compact_form()
else:
bml,pml=None,None #get_blockmarker(HL0)
bmr,pmr=None,None #get_blockmarker(HR0)
if target_block is None:
Hc,bm_tot=_gen_hamiltonian_full(HL0,HR0,hgen_l,hgen_r,interop=interop),None
else:
if False: #efficiency cross over
Hc,bm_tot,pm_tot=_gen_hamiltonian_block0(HL0,HR0,hgen_l=hgen_l,hgen_r=hgen_r,\
blockinfo=dict(bml=bml,bmr=bmr,pml=pml,pmr=pmr,bmg=self.bmg,target_block=target_block),interop=interop)
else:
Hc,bm_tot,pm_tot=_gen_hamiltonian_block(HL0,HR0,hgen_l=hgen_l,hgen_r=hgen_r,\
blockinfo=dict(bml=bml,bmr=bmr,pml=pml,pmr=pmr,bmg=self.bmg,target_block=target_block),interop=interop)
#get the starting eigen state v00!
if initial_state is None:
initial_state=random.random(bm_tot.N)
if not self.symm_handler==None:
if hgen_l is not hgen_r:
#Note, The cases to disable C2 symmetry,
#1. NL!=NR
#2. NL==NR, reflection is not used(and not the first iteration).
self.symm_handler.update_handlers(OPL=OPL,OPR=OPR,useC=False)
else:
nl=(int32(1-sign4bm(bml,self.bmg,diag_only=True))/2)[argsort(pml)]
self.symm_handler.update_handlers(OPL=OPL,OPR=OPR,n=nl,useC=True)
v00=self.symm_handler.project_state(phi=initial_state)
if self.iprint==10:assert(self.symm_handler.check_op(H))
else:
v00=initial_state
#perform diagonalization
##1. detect specific block for diagonalization, get v0 and projector
projector=self.symm_handler.get_projector() if len(self.symm_handler.symms)!=0 else None
if self.bmg is None or target_block is None:
v0=v00/norm(v00)
else:
indices=pm_tot[bm_tot.get_slice(target_block,uselabel=True)]
v0=v00[indices]
if projector is not None:
projector=projector[indices]
##2. diagonalize to get desired number of levels
detect_C2=self.symm_handler.target_sector.has_key('C')# and not symm_handler.useC
t1=time.time()
if norm(v0)==0:
warnings.warn('Empty v0')
v0=None
print 'The density of Hamiltonian -> %s'%(1.*len(Hc.data)/Hc.shape[0]**2)
e,v=self._eigsh(Hc,v0,sigma=e_estimate,projector=projector,
lc_search_space=self.symm_handler.detect_scope if detect_C2 else 1,k=nlevel,tol=1e-10)
if v0 is not None:
print 'The goodness of estimate -> %s'%(v0.conj()/norm(v0)).dot(v[:,0])
t2=time.time()
##3. permute back eigen-vectors into original representation al,sl+1,sl+2,al+2
if bm_tot is not None:
indices=pm_tot[bm_tot.get_slice(target_block,uselabel=True)]
vl=zeros([bm_tot.N,v.shape[1]],dtype=v.dtype)
vl[indices]=v; vl=vl.T
else:
vl=v.T
#Do-wavefunction analysis, preliminary truncation is performed(up to ZERO_REF).
for v in vl:
v[abs(v)<ZERO_REF]=0
#spec1,U1,kpmask1,trunc_error=self.rdm_analysis(phis=vl,bml=bml,bmr=bmr,side='l',maxN=maxN)
U1,specs,U2,(kpmask1,kpmask2),trunc_error=self.svd_analysis(phis=vl,bml=HL0.shape[0] if bml is None else bml,\
bmr=HR0.shape[0] if bmr is None else bmr,pml=pml,pmr=pmr,maxN=maxN)
print '%s states kept.'%sum(kpmask1)
hgen_l.trunc(U=U1,kpmask=kpmask1) #kpmask is also important for setting up the sign
if hgen_l is not hgen_r:
#spec2,U2,kpmask2,trunc_error=self.rdm_analysis(phis=vl,bml=bml,bmr=bmr,side='r',maxN=maxN)
hgen_r.trunc(U=U2,kpmask=kpmask2)
phil=[phi.reshape([ndiml0,hndim,ndimr0,hndim]) for phi in vl]
t3=time.time()
print 'Elapse -> prepair:%.2f, eigen:%.2f, trunc: %.2f'%(t1-t0,t2-t1,t3-t2)
return e,trunc_error,phil
def svd_analysis(self,phis,bml,bmr,pml,pmr,maxN):
'''
The direct analysis of state(svd).
Parameters:
:phis: list of 1D array, the kept eigen states of current iteration.
:bml/bmr: <BlockMarker>/int, the block marker for left and right blocks/or the dimensions.
:maxN: int, the maximum kept values.
Return:
tuple of (spec, U), the spectrum and Unitary matrix from the density matrix.
'''
if isinstance(bml,numbers.Number):
use_bm=False
ndiml,ndimr=bml,bmr
else:
ndiml,ndimr=bml.N,bmr.N
use_bm=True
phi=sum(phis,axis=0).reshape([ndiml,ndimr])/sqrt(len(phis)) #construct wave function of equal distribution of all states.
phi[abs(phi)<ZERO_REF]=0
if use_bm:
phi=phi[pml]
phi=phi[:,pmr]
def mapping_rule(bli):
res=self.bmg.bcast_sub([self.target_block],[bli])[0]
return tuple(res)
U,S,V,S2=svdb(phi,bm=bml,bm2=bmr,mapping_rule=mapping_rule,full_matrices=True)
else:
U,S,V=svd(phi,full_matrices=True);U2=V.T.conj()
if ndimr>=ndiml:
S2=append(S,zeros(ndimr-ndiml))
else:
S2=append(S,zeros(ndiml-ndimr))
S,S2=S2,S
S,S2=sps.diags(S,0),sps.diags(S2,0)
spec_l=S.dot(S.T.conj()).diagonal().real
spec_r=S2.T.conj().dot(S2).diagonal().real
if use_bm:
if self.iprint==10 and not (bml.check_blockdiag(U.dot(sps.diags(spec_l,0)).dot(U.T.conj())) and\
bmr.check_blockdiag((V.T.conj().dot(sps.diags(spec_r,0))).dot(V))):
raise Exception('''Density matrix is not block diagonal, which is not expected,
1. make sure your are using additive good quantum numbers.
2. avoid ground state degeneracy.''')
#permute U and V
U,V=U.tocsr()[argsort(pml)],V.tocsc()[:,argsort(pmr)]
U2=V.T.conj()
kpmasks=[]
for Ui,spec in zip([U,U2],[spec_l,spec_r]):
kpmask=zeros(Ui.shape[1],dtype='bool')
spec_cut=sort(spec)[max(0,Ui.shape[0]-maxN)]
kpmask[(spec>=spec_cut)&(spec>ZERO_REF)]=True
trunc_error=sum(spec[~kpmask])
kpmasks.append(kpmask)
U,U2=_eliminate_zeros(U,ZERO_REF),_eliminate_zeros(U2,ZERO_REF)
return U,(spec_l,spec_r),U2,kpmasks,trunc_error
def rdm_analysis(self,phis,bml,bmr,side,maxN):
'''
The analysis of reduced density matrix.
Parameters:
:phis: list of 1D array, the kept eigen states of current iteration.
:bml/bmr: <BlockMarker>/int, the block marker for left and right blocks/or the dimensions.
:side: 'l'/'r', view the left or right side as the system.
:maxN: the maximum kept values.
Return:
tuple of (spec, U), the spectrum and Unitary matrix from the density matrix.
'''
assert(side=='l' or side=='r')
ndiml,ndimr=(bml,bmr) if isinstance(bml,numbers.Number) else (bml.N,bmr.N)
phis=[phi.reshape([ndiml,ndimr]) for phi in phis]
rho=0
phil=[]
if side=='l':
for phi in phis:
phi=sps.csr_matrix(phi)
rho=rho+phi.dot(phi.T.conj())
phil.append(phi)
bm=bml
else:
for phi in phis:
phi=sps.csc_matrix(phi)
rho=rho+phi.T.dot(phi.conj())
phil.append(phi)
bm=bmr
if bm is not None:
rho=bm.blockize(rho)
if self.iprint==10 and not bm.check_blockdiag(rho,tol=1e-5):
ion()
pcolor(exp(abs(rho.toarray().real)))
show_bm(bm)
pdb.set_trace()
raise Exception('''Density matrix is not block diagonal, which is not expected,
1. make sure your are using additive good quantum numbers.
2. avoid ground state degeneracy.''')
spec,U=eigbh(rho,bm=bm)
kpmask=zeros(U.shape[1],dtype='bool')
spec_cut=sort(spec)[max(0,U.shape[0]-maxN)]
kpmask[(spec>=spec_cut)&(spec>ZERO_REF)]=True
trunc_error=sum(spec[~kpmask])
print 'With %s(%s) blocks.'%(bm.nblock,bm.nblock)
return spec,U,kpmask,trunc_error
def state_prediction(self,phi,l,direction):
'''
Predict the state for the next iteration.
Parameters:
:phi: ndarray, the state from the last iteration, [llink, site1, rlink, site2]
:l: int, the current division point, the size of left block.
:direction: '->'/'<-', the moving direction.
Return:
ndarray, the new state in the basis |al+1,sl+2,sl+3,al+3>.
reference -> PRL 77. 3633
'''
assert(direction=='<-' or direction=='->')
nsite=self.hgen.nsite
NL,NR=l,nsite-l
phi=tensor.Tensor(phi,labels=['a_%s'%(NL-1),'s_%s'%(NL),'b_%s'%(NR-1),'t_%s'%NR]) #l=NL-1
if self.reflect and nsite%2==0 and l==nsite/2-1 and direction=='->': #hard prediction!
return self._state_prediction_hard(phi)
hgen_l,hgen_r=self.query('l',NL),self.query('r',NR)
lr=NR-2 if direction=='->' else NR-1
ll=NL-1 if direction=='->' else NL-2
A=hgen_l.evolutor.A(ll,dense=True) #get A[sNL](NL-1,NL)
B=hgen_r.evolutor.A(lr,dense=True) #get B[sNR](NL+1,NL+2)
if direction=='->':
A=tensor.Tensor(A,labels=['s_%s'%NL,'a_%s'%(NL-1),'a_%s'%NL]).conj()
B=tensor.Tensor(B,labels=['t_%s'%(NR-1),'b_%s'%(NR-2),'b_%s'%(NR-1)])#.conj() #!the conjugate? right side shrink, so B(al,al+1) do not conjugate.
phi=tensor.contract([A,phi,B])
phi=phi.chorder([0,1,3,2])
if hgen_r.use_zstring: #cope with the sign problem
n1=(1-Z4scfg(hgen_l.spaceconfig).diagonal())/2
nr=(1-hgen_r.zstring(lr).diagonal())/2
n_tot=n1[:,newaxis,newaxis]*(nr[:,newaxis]+n1)
phi=phi*(1-2*(n_tot%2))
else:
A=tensor.Tensor(A,labels=['s_%s'%(NL-1),'a_%s'%(NL-2),'a_%s'%(NL-1)])#.conj()
B=tensor.Tensor(B,labels=['t_%s'%NR,'b_%s'%(NR-1),'b_%s'%NR]).conj() #!the conjugate?
phi=tensor.contract([A,phi,B])
phi=phi.chorder([1,0,3,2])
if hgen_r.use_zstring: #cope with the sign problem
n1=(1-Z4scfg(hgen_l.spaceconfig).diagonal())/2
nr=(1-hgen_r.zstring(lr+1).diagonal())/2
n_tot=n1*(nr[:,newaxis])
phi=phi*(1-2*(n_tot%2))
return phi
def _state_prediction_hard(self,phi):
'''
The hardest prediction for reflection point for phi(al,sl+1,sl+2,al+2) -> phi(al-1,sl,sl+1,al+1')
'''
nsite=self.hgen.nsite
l=nsite/2
hgen_l,hgen_r0,hgen_r=self.query('l',l-1),self.query('r',l+2),self.query('r',l-1)
#do regular evolution to phi(al,sl+1,sl+2,al+2) -> phi(al-1,sl,sl+1,al+1)
A=hgen_l.evolutor.A(l-2,dense=True) #get A[sNL](NL-1,NL)
B=hgen_r0.evolutor.A(l-1,dense=True) #get B[sNR](NL+1,NL+2)
A=tensor.Tensor(A,labels=['s_%s'%(l-1),'a_%s'%(l-2),'a_%s'%(l-1)])
B=tensor.Tensor(B,labels=['t_%s'%l,'b_%s'%(l-1),'b_%s'%(l)]).conj()
phi=tensor.contract([A,phi,B])
if hgen_r.use_zstring: #cope with the sign problem
n1=(1-Z4scfg(hgen_l.spaceconfig).diagonal())/2
nr=(1-hgen_r0.zstring(l-1).diagonal())/2
n_tot=n1[:,newaxis,newaxis]*(nr+n1[:,newaxis])
phi=phi*(1-2*(n_tot%2))
#do the evolution from phi(al-1,sl,sl+1,al+1) -> phi(al-1,sl,sl+1,al+1')
#first calculate tensor R(al+1',al+1), right one incre, left decre.
BL0=hgen_r0.evolutor.get_AL(dense=True)[:l-1]
BL=hgen_r.evolutor.get_AL(dense=True)
BL0=[tensor.Tensor(bi,labels=['t_%s'%(i+1),'b_%s'%i,'b_%s'%(i+1)]) for i,bi in enumerate(BL0)]
BL=[tensor.Tensor(bi,labels=['t_%s'%(i+1),'b_%s'%i+('\'' if i!=0 else ''),'b_%s\''%(i+1)]).conj() for i,bi in enumerate(BL)]
R=BL[0]*BL0[0]
for i in xrange(1,l-1):
R=tensor.contract([R,BL0[i],BL[i]])
#second, calculate phi*R
phi=phi*R
phi=phi.chorder([0,1,3,2])
return phi
def get_mps(self,phi,l,labels=['s','a'],direction=None):
'''
Get the MPS from run-time phi, and evolution matrices.
Parameters:
:phi: ndarray, the eigen-function of current step.
:l: int, the size of left block.
:direction: '->'/'<-'/None, if None, the direction is provided by the truncation information.
Return:
<MPS>, the disired MPS, the canonicallity if decided by the current position.
'''
#get the direction
assert(direction=='<-' or direction=='->')
nsite=self.hgen.nsite
NL,NR=l,nsite-l
hgen_l,hgen_r=self.query('l',NL),self.query('r',NR)
return _get_mps(hgen_l,hgen_r,phi,direction,labels)
def fix_tail(mps,spaceconfig,parity,head2tail=True):
'''
Fix the ordering to normal order(reverse).
Parameters:
:mps: <MPS>, the matrix product state.
:spaceconfig: <SpaceConfig>,
:parity: int, 1 for odd parity, 0 for even parity.
:head2tail: bool, move head to tail if True, else move tail to head.
Return:
<MPS>, the new MPS.
'''
nsite=mps.nsite
assert(allclose(mps.forder,[0]+range(1,nsite)[::-1]))
n1=(1-Z4scfg(spaceconfig).diagonal())/2
site_axis=mps.site_axis
if head2tail:
j=list(mps.forder).index(0)
norder=array(mps.forder)-1
norder[j]=nsite-1
else:
j=list(mps.forder).index(nsite-1)
norder=array(mps.forder)+1
norder[j]=0
mps.forder=norder
if parity==1:
return mps
if j<mps.l:
mps.AL[j]=mps.AL[j]*(1-2*(n1%2))[tuple([slice(None)]+[newaxis]*(2-site_axis))]
else:
mps.BL[j-mps.l]=mps.BL[j-mps.l]*(1-2*(n1%2))[tuple([slice(None)]+[newaxis]*(2-site_axis))]
return mps
|
GiggleLiu/dmrg
|
dmrg.py
|
Python
|
gpl-2.0
| 37,043
|
#!/usr/bin/env python
import os
import sys
import yaml
import pprint
import moa.script
args = moa.script.getArgs()
def errex(message):
print message
sys.exit(-1)
statfiles = args['stats_files']
data = {}
for sf in statfiles:
with open(sf) as F:
d = yaml.load(F)
bn = os.path.basename(d['fasta'])
bn = bn.replace('.fasta', '')
bn = bn.replace('.fa', '')
bn = bn.replace('.fna', '')
bn = bn.replace('.seq', '')
data[bn] = d
kys = data.keys()
kys.sort()
with open('report.md', 'w') as F:
F.write("#fasta stats\n\n")
F.write("## Some basic length stats\n\n")
F.write("!! Max sequence (contig) length\n")
F.write("%20s max\n" % "")
for k in kys:
if not data[k]['data']:
mx = 0
else:
mx = data[k]['data']['len']['max']
F.write("%-20s %10d\n" % (k, mx))
F.write("!# chs=1000x200\n\n")
F.write("!! No sequences (contigs)\n")
F.write("%20s No\n" % "")
for k in kys:
if not data[k]['data']:
mx = 0
else:
mx = data[k]['data']['len']['n']
F.write("%-20s %10d\n" % (k, mx))
F.write("!# chs=1000x200\n\n")
F.write("!! Total sequence length\n")
F.write("%20s Total\n" % "")
for k in kys:
if not data[k]['data']:
mx = 0
else:
mx = data[k]['data']['len']['sum']
F.write("%-20s %10d\n" % (k, mx))
F.write("!# chs=1000x200\n\n")
F.write("!! N50\n")
F.write("%20s n50\n" % "")
for k in kys:
if not data[k]['data']:
mx = 0
else:
mx = data[k]['data']['len']['n50']
F.write("%-20s %10d\n" % (k, mx))
F.write("!# chs=1000x200\n\n")
F.write("!! No N's\n")
F.write("%20s NoN\n" % "")
for k in kys:
if not data[k]['data']:
mx = 0
else:
mx = data[k]['data']['non']['sum']
F.write("%-20s %10d\n" % (k, mx))
F.write("!# chs=1000x200\n\n")
#write most important stats to a table
F.write('| %15s ' % 'Id')
F.write('| Tot.Seq.len | Contigs | Longest.Cnt | N50 |')
F.write('Seqlen-Ns | Tot no Ns |\n')
F.write('|%s|-------------|---------|-------------|' % ('-' * 17))
F.write('-----------|------------|------------|\n')
for k in kys:
F.write("| %15s |%12.3g |%8g |%12.3g |%10.3g | %10.3g | %10.3g |\n" % (
k[:15],
data[k]['data']['len']['sum'],
data[k]['data']['len']['n'],
data[k]['data']['len']['max'],
data[k]['data']['len']['n50'],
data[k]['data']['len']['sum'] - data[k]['data']['non']['sum'],
data[k]['data']['non']['sum'],
))
|
mfiers/Moa
|
moa/data/templates/fastainfo.finish.py
|
Python
|
gpl-3.0
| 2,769
|
#!/usr/bin/python3
# Copyright 2016-2018 Francisco Pina Martins <f.pinamartins@gmail.com>
# and Joao Baptista <baptista.joao33@gmail.com>
# This file is part of pyRona.
# pyRona is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyRona is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyRona. If not, see <http://www.gnu.org/licenses/>.
import pickle
from collections import defaultdict
import numpy as np
import pyRona.pyRona as pr
import pytest
assert pytest # Hacky solution to stop the linters from complaining
# Initiatize the Rona class
with open("../tests/data/jar/file_parser.popnames_parser.pickle", "rb") as f:
pr.RonaClass.POP_NAMES = pickle.load(f)
RONAS = {}
RONA = pr.RonaClass("5")
RONA_DICT = {'558': [0.0014018084856003402, 0.00044007391611439851,
5.4391158171442511e-05, 0.00076889319051446367,
0.0012089671066288572, 0.0012905538438860258,
0.00044007391611439851, 0.00024723253714292048,
0.00016564579988575674, 0.00022003695805716098,
0.0003560148534858055, 0.001649041022743251,
0.0023907386341719839, 0.00076889319051452103,
0.0014561996437717636, 0.000192841378971478],
'917': [0.034081182741242728, 0.010699207280319563,
0.0013223739335225977, 0.018693558787524652,
0.029392766067844217, 0.031376326968128254,
0.010699207280319563, 0.0060107906069210527,
0.0040272297066371275, 0.0053496036401597816,
0.00865553847396636, 0.040091973348163668,
0.058124345168927158, 0.018693558787524652,
0.035403556674765267, 0.004688416673398455],
'928': [0.0083601552048518327, 0.015616386870309709,
0.0019301152311618597, 0.027284810767788412,
0.042901197638098118, 0.045796370484840906,
0.015616386870309709, 0.0087732510507357846,
0.0058780782039929945, 0.0078081934351548546,
0.012633481513059503, 0.058517584508407702,
0.084837337660615311, 0.027284810767788412,
0.051674448688833904, 0.0068431358195739245],
'1061': [0.010428942606976906, 0.0032739890371108844,
0.00040465033042942443, 0.0057202842165252595,
0.0089942732536361439, 0.0096012487492803129,
0.0032739890371108844, 0.0018393196837702072,
0.0012323441881260389, 0.0016369945185554632,
0.0026486203446290349, 0.012268262290747071,
0.017786221342057564, 0.0057202842165252179,
0.010833592937406331, 0.0014346693533407616]}
RONA.pop_ronas = defaultdict(list)
for k, v in RONA_DICT.items():
RONA.pop_ronas[k] = v
RONA.corr_coef = {'558': 0.00030210072963844631,
'917': 0.19358706622191832,
'928': 0.27211499170048115,
'1061': 0.017094197215003121}
RONA.avg_ronas = [0.018735931355473877, 0.013199755733777314,
0.0016314304839499785, 0.023062494568566048,
0.036262250302343361, 0.038709396028268381,
0.013199755733777314, 0.0074155931088636358,
0.0049684473829386556, 0.0065998778668886569,
0.010678454076763636, 0.049462006036120554,
0.071708785362711747, 0.023062494568566048,
0.043677843411207, 0.0057841626249136571]
RONA.stderr_ronas = [0.0061531608283148442, 0.0029980427796045179,
0.00037054461320954265, 0.0052381533958258866,
0.008236196175430404, 0.0087920130952447249,
0.0029980427796045179, 0.001684293696407024,
0.0011284767765927107, 0.0014990213898022629,
0.0024253829228261183, 0.01123423895503489,
0.016287120044256045, 0.0052381533958258822,
0.0099204898718374332, 0.0013137490831974829]
RONAS['5'] = RONA
RONA = pr.RonaClass("11")
RONA_DICT = {'109': [0.0088246354874106526, 0.027885848140217651,
0.006249038485830795, 0.0075278627810403476,
0.0058167809170407035, 0.0081934496949213342,
0.0050195723213135417, 0.0068144134373971212,
0.0065646313820755019, 0.005800328206809916,
0.0050360250315443457, 0.0075622639024319196,
0.0083265670776975064, 0.0034468915738891386,
0.0065152732513832018, 0.0067964650262362734],
'145': [0.025307793158957186, 0.44069278775643461,
0.098756407811970787, 0.11896625191901598,
0.091925244131343786, 0.074627513461562239,
0.07932659277572375, 0.10769128625929614,
0.10374386641616549, 0.0916652344410777,
0.07958660246598985, 0.11950990854411785,
0.13158854051920571, 0.099796446573035186,
0.10296383734536738, 0.10740763932446029],
'214': [0.12627765123670265, 0.31446760951342106,
0.089421699468973492, 0.10772125740242791,
0.083236234857548552, 0.11724558872451812,
0.071828440262775239, 0.097512030344816494,
0.054387314268601793, 0.083000801948463154,
0.028458930171942256, 0.10821352621233365,
0.033109743383802417, 0.09036343110531507,
0.093231431997809602, 0.097255194443996038],
'257': [0.014856241243968688, 0.046945722330941131,
0.010520233206322257, 0.012673129183202533,
0.0097925291860668125, 0.013793642293976364,
0.0084504314601286518, 0.011472039848732461,
0.011051532681318412, 0.0097648311091713038,
0.0084781295370241899, 0.012731043343984024,
0.014017744916131154, 0.010631025513904347,
0.010968438450631807, 0.011441823764846403],
'415': [0.050225214764677686, 0.036530970201609642,
0.035566262252003926, 0.042844662018749632,
0.033106078003361288, 0.046632834996763448,
0.028568783178009906, 0.038784081096249381,
0.037362452135961025, 0.033012437772444059,
0.028662423408927069, 0.043040455228849209,
0.047390469592366265, 0.035940823175672799,
0.037081531443209532, 0.038681928117066992],
'710': [0.089674027645291116, 0.28336992735911998,
0.063501370763055329, 0.076496505277754243,
0.059108863307209714, 0.083260054820492299,
0.051007802843660542, 0.069246588127448522,
0.066708357175454674, 0.058941674442108294,
0.051174991708761851, 0.076846081995693538,
0.050483450975609093, 0.064170126223460897,
0.06620679058015054, 0.06906420027461066],
'887': [0.057053046346716159, 0.18028762645562316,
0.040401292819759363, 0.048669149536105509,
0.037606660549555769, 0.01551759777805072,
0.032452546362640604, 0.044056555789091338,
0.042441664477243589, 0.0375002904631467,
0.032558916449049735, 0.048891559716779172,
0.053832933730876102, 0.040826773165395923,
0.042122554218016202, 0.043940515694826814],
'1010': [0.075382945098012438, 0.22746625088558872,
0.023886087322913935, 0.064305485199711271,
0.049688859912910249, 0.069991148007951184,
0.042878841313377922, 0.058210965740092289,
0.056077245090707849, 0.049548315438998652,
0.043019385787289428, 0.064599350917890014,
0.071128280569599203, 0.053943524441323464,
0.055655611668973239, 0.058057644495825199]}
RONA.pop_ronas = defaultdict(list)
for k, v in RONA_DICT.items():
RONA.pop_ronas[k] = v
RONA.corr_coef = {'109': 0.0006640909658998809,
'145': 0.14430216359296372,
'214': 0.10406403494450837,
'257': 0.0012449897480285869,
'415': 0.014196158980969362,
'710': 0.055988694433971149,
'887': 0.024541999123225522,
'1010': 0.031127284539578529}
RONA.avg_ronas = [0.069916590853416033, 0.33042827432762917,
0.078081886442357085, 0.097001282569124797,
0.074952908386911654, 0.08208789223743565,
0.064680370415653249, 0.087808035641750992,
0.073646989317191325, 0.074740904789005747,
0.05282815857307914, 0.097444562819291675,
0.07840782467814407, 0.081365771844823354,
0.083953424770734683, 0.087576758989489953]
RONA.stderr_ronas = [0.013313151175645535, 0.049792720537769092,
0.011485830558008609, 0.013381880064626782,
0.010340181118882092, 0.012982193864458787,
0.0089230259282510414, 0.012113619227983244,
0.01029680158551243, 0.010310934013634544,
0.0079085804585694048, 0.013443033102871656,
0.01276549989475941, 0.011404533167324531,
0.011581853678027876, 0.012081713294985915]
RONAS['11'] = RONA
RONA = pr.RonaClass("12")
RONA_DICT = {'649': [0.086850728432886104, 0.080731124560793566,
0.064680192750292742, 0.076100475402105461,
0.059230618499232099, 0.068328131798133768,
0.048807935505400434, 0.072467425901398486,
0.06881948685355728, 0.06386126765792019,
0.050624460255753921, 0.058164563645681781,
0.084215278590160134, 0.076264260420579988,
0.065514007389799275, 0.070814686169519414],
'707': [0.013144357178726815, 0.012218190403404194,
0.0097889743844315116, 0.011517368342271985,
0.008964212730494624, 0.010341068934198124,
0.0073867997311617374, 0.010967527239647418,
0.010415432689880877, 0.0096650347916268503,
0.0076617202824740211, 0.011868906096409067,
0.0086073572837947112, 0.01154215626083294,
0.0099151674243781809, 0.010717394606896052],
'784': [0.023412187537834438, 0.10163030427622605,
0.081424205417913259, 0.095800900987788859,
0.074563878718337695, 0.086016500612984326,
0.061443035303848932, 0.0047645632706270645,
0.081945602066328824, 0.080393282006774883,
0.063729810870374076, 0.093075869060004354,
0.10601641478906937, 0.09600708567001659,
0.082473872891072414, 0.089146758970440956]}
RONA.pop_ronas = defaultdict(list)
for k, v in RONA_DICT.items():
RONA.pop_ronas[k] = v
RONA.corr_coef = {'649': 0.016349693873427815,
'707': 0.00036288134317020955,
'784': 0.043514409885779509}
RONA.avg_ronas = [0.040571849732149917, 0.095418119191833364,
0.076447124634696412, 0.089945040057074904,
0.070006137614369746, 0.08075871430677295,
0.057687309979865299, 0.023181092279660542,
0.077951297370389791, 0.075479216749128192,
0.059834305653307468, 0.083109279382294843,
0.09951119404950319, 0.090138621634188593,
0.07743263084545679, 0.083697634613861913]
RONA.stderr_ronas = [0.018819322196863317, 0.022048199697093925,
0.017664584928841015, 0.020783539035751947,
0.016176270452792248, 0.018660861012534825,
0.013329767356524136, 0.017642952538880076,
0.017950439563746097, 0.017440931114134219,
0.013825872181873717, 0.019203257813331393,
0.024098848741442403, 0.020828269798693309,
0.017892305176542465, 0.019339955322644532]
RONAS['12'] = RONA
RONA = pr.RonaClass("14")
RONA_DICT = {'250': [0.024094829252647242, 0.0080535780488881388,
0.00036906740649573545, 0.0092266851623922785,
0.027535064720339297, 0.0066629847851275648,
0.00036906740649569203, 0.0014630886471793769,
0.01654871888769073, 0.0068804709353839323,
0.004830828731452518, 0.018163388791109365,
0.0013905932637605517, 0.0016080794140169408,
0.023066712905980696, 0.0013905932637605517],
'278': [0.0083707381067368858, 0.0027978779995712125,
0.00012821699507037482, 0.0032054248767591949,
0.0095659036679285273, 0.0023147746788596672,
0.00012821699507037482, 0.00050828880188611369,
0.0057491584753874029, 0.0023903311223832922,
0.0016782688819032136, 0.0063101078288202578,
0.00048310332071156324, 0.0005586597642352147,
0.0080135621918979976, 0.00048310332071158085],
'349': [0.030083050613182703, 0.01005511155615673,
0.00046079070961111329, 0.011519767740277832,
0.034378278299200551, 0.0083189179895863264,
0.00046079070961111329, 0.001826706027386894,
0.020661526282741132, 0.0085904553720357307,
0.0027722095426111044, 0.02267748563728977,
0.0017361935665704713, 0.0020077309490198411,
0.028799419350694581, 0.0017361935665704713],
'421': [0.11892109729753635, 0.039748791273957675,
0.0018215485362861219, 0.045538713407152855,
0.056334051677992228, 0.032885456610451169,
0.0018215485362861219, 0.0072211388402771189,
0.081676935260972056, 0.033958869140762564,
0.023842769233887891, 0.089646210107223734,
0.0068633346635065757, 0.0079367471938180424,
0.11384678351788213, 0.0068633346635066416],
'829': [0.054535665742069542, 0.018228277772650153,
0.0008353384249332285, 0.020883460623330775,
0.062322213203054304, 0.015080841921562444,
0.0008353384249332285, 0.0033115201845567216,
0.037455978303702592, 0.015573094921969528,
0.010933983312072472, 0.041110583912785416,
0.0031474358510877082, 0.0036396888514948359,
0.052208651558326963, 0.0031474358510877082]}
RONA.pop_ronas = defaultdict(list)
for k, v in RONA_DICT.items():
RONA.pop_ronas[k] = v
RONA.corr_coef = {'250': 0.0040724658723727027,
'278': 0.00054093126158548193,
'349': 0.0064761181412646201,
'421': 0.1676523858378404,
'829': 0.028274333835238769}
RONA.avg_ronas = [0.10519387557463623, 0.035160534997867976,
0.0016112847462198174, 0.040282118655495261,
0.055776326583905605, 0.02908944425764699,
0.0016112847462198163, 0.0063875931010856932,
0.072248857102820474, 0.030038951340240753,
0.020988608029702958, 0.079298227867532073,
0.0060710907402210454, 0.0070205978228148717,
0.10070529663873815, 0.0060710907402210983]
RONA.stderr_ronas = [0.017357739754244265, 0.0058017390535247491,
0.00026587347544794097, 0.0066468368861985682,
0.0086228380619286314, 0.0047999657799619802,
0.0002658734754479433, 0.0010539984205257748,
0.011921576729460438, 0.0049566412208509351,
0.0036531944886092765, 0.013084773184545175,
0.0010017732735627778, 0.0011584487144517435,
0.016617092215496416, 0.0010017732735627871]
RONAS['14'] = RONA
RONA = pr.RonaClass("15")
RONA_DICT = {'63': [0.0041548828836814834, 0.028923691147344867,
0.0049573280758087529, 0.0051534813449954668, 0.0,
0.038838347298962475, 0.0011947517305006977,
0.012286327497238353, 0.005349634614182113,
0.0009985984613139836, 0.0019793648072474017,
0.0001961532691867142, 0.0037625763453081229,
0.011894020958864984, 0.0001961532691867142,
0.0073289994214295997],
'121': [0.0064442414620758228, 0.044860771036424676,
0.0076888374526054692, 0.0024476349908443538, 0.0,
0.060238445941635611, 0.0018530651414552621,
0.019056147499443035, 0.0082973066035310853,
0.0015488305659924679, 0.0030700034433064939,
0.0003042345754628079, 0.0058357723111501798,
0.018447678348517418, 0.0003042345754628079,
0.011367310046837551],
'128': [0.004270226164716436, 0.029726638794721393,
0.0050949479561853035, 0.0052965466163221306, 0.0,
0.039916534707092111, 0.0012279191117424892,
0.012627406984934081, 0.0054981452764589689,
0.0010263204516056837, 0.0020343137522898205,
0.00020159866013683841, 0.003867028844442847,
0.012224209664660427, 0.00020159866013681638,
0.0075324590287487786],
'130': [0.018687379720911685, 0.13008982792840659,
0.022296530310787308, 0.023178767121645769, 0.0,
0.0095787024816304939, 0.0053736242115926484,
0.017000520619699467, 0.024061003932504297,
0.0044913874007341528, 0.0089025714550265943,
0.00088223681085849493, 0.016922906099194692,
0.053495632076601227, 0.00088223681085849493,
0.032963575387530926],
'198': [0.0057012573938117449, 0.039688581514002827,
0.0068023586072088671, 0.0070715166815948213, 0.0,
0.053293298728420604, 0.0016394173621690632,
0.0168590830229026, 0.0073406747559807556,
0.0013702592877830764, 0.0027160496597128953,
0.0002691580743859866, 0.0051629412450398243,
0.01583812468218869, 0.0002691580743859866,
0.010056724415693765],
'351': [0.0049797404699712198, 0.034665832799541885,
0.0059414929212531558, 0.0061765879648999333, 0.0,
0.028778903072305961, 0.0014319425385753899,
0.014725498642962012, 0.0064116830085465598,
0.0011968474949286881, 0.0023723227131622468,
0.00023509504364672672, 0.0045095503826777664,
0.014255308555668584, 0.0002350950436467142,
0.0087840057217088569],
'398': [0.0038362676770137554, 0.026705691725821105,
0.0045771777433897222, 0.0047582890929483987, 0.0,
0.035860047212600735, 0.0011031327654932273,
0.011344156349624391, 0.0049394004425069825,
0.00092202141593462808, 0.0018275781637275934,
0.00018111134955858379, 0.0034740449778965878,
0.010981933650507193, 0.00018111134955859929,
0.0067669786062345763],
'438': [0.004821987652786127, 0.019876817610221306,
0.0057532728217791554, 0.0059809203075329893, 0.0,
0.029567370645590094, 0.0013865801405007266,
0.014259010698582073, 0.0062085677932868389,
0.0011589326547468775, 0.002297170083516124,
0.00022764748575384932, 0.0043666926812784062,
0.013803715727074374, 0.00022764748575381972,
0.0085057378768029178],
'526': [0.00090930830604254997, 0.0063300346454979719,
0.0010849257900422003, 0.0011278545083532093, 0.0,
0.0084998862255823548, 0.00026147492062167718,
0.0026888988105721674, 0.0011707832266642534,
0.0002185462023106681, 0.0004331897938657661,
4.2928718311022215e-05, 0.00082345086942052308,
0.002603041373950149, 4.2928718311026592e-05,
0.001603973020530002],
'548': [0.0020518720143468442, 0.010509210891373473,
0.0024481563089631736, 0.0025450258032027216, 0.0,
0.019180159859430893, 0.00059002328309545437,
0.0060675528664590915, 0.00264189529744227,
0.00049315378885593454, 0.00097750126005364786,
9.6869494239548387e-05, 0.0018581330258677332,
0.0058738138779799659, 9.6869494239548387e-05,
0.0036193965574959465],
'610': [0.010028705106904252, 0.069813560872956329,
0.011965579483774259, 0.012439037664786918, 0.0,
0.093744719840504878, 0.0028837907388952343,
0.02965569879251968, 0.012912495845799567,
0.0024103325578825902, 0.0047776234629458387,
0.00047345818101265957, 0.0090817887448789939,
0.028708782430494392, 0.00047345818101262899,
0.017690119308745419],
'632': [0.011795739709021936, 0.082114548532332896,
0.014073886863124859, 0.014630767278572267, 0.0,
0.11026232225858267, 0.0033919079849977227,
0.034880964203931809, 0.015187647694019625,
0.0028350275695503326, 0.0056194296467872631,
0.00055688041544738347, 0.010681978878127116,
0.033767203373037048, 0.00055688041544738997,
0.020807077340806952],
'769': [0.019041403888345056, 0.13255432234719178,
0.022718928244463216, 0.023617878642625462, 0.0,
0.17799217883611829, 0.0054754251524425776,
0.056306984030342261, 0.024516829040787634,
0.0045764747542803462, 0.0090712267450914277,
0.00089895039816221623, 0.017243503092020655,
0.054509083234017847, 0.00089895039816220127,
0.033588055785879062],
'788': [0.010184919771864401, 0.070901029484824407,
0.012151964362997045, 0.012632797485273874, 0.0,
0.095204958210818494, 0.0029287108356863029,
0.030117638295341573, 0.013113630607550754,
0.0024478777134094858, 0.0048520433247937729,
0.00048083312227686751, 0.0092232535273106906,
0.029155972050787848, 0.00048083312227686751,
0.017965673932344526],
'897': [0.0071110449064785532, 0.049502638791022499,
0.0084844226781160981, 0.0088201372445163382, 0.0,
0.066471484147254609, 0.0020448069044380433,
0.021027939659071876, 0.009155851810916606,
0.0017090923380377791, 0.0033876651700391496,
0.0003357145664002741, 0.0064396157736780254,
0.020356510526271288, 0.0003357145664002741,
0.012543516980955764],
'938': [0.0068591233483873346, 0.047748918760018293,
0.0081838467418526833, 0.0085076680158108816, 0.0,
0.033426205119968494, 0.0019723659413817644,
0.020282987068836401, 0.0088314892897691285,
0.0016485446674235941, 0.0032676510372145677,
0.00032382127395819935, 0.0062114808004709301,
0.019635344520919969, 0.00032382127395819935,
0.012099140326983672],
'948': [0.015863629990114328, 0.086408060633542103,
0.01892742118992178, 0.019676347927652518, 0.0,
0.14828749407068251, 0.0045616446752688939,
0.046910047481496887, 0.02042527466538329,
0.0038127179375382235, 0.0075573516261918132,
0.00074218253240479579, 0.014365776514652904,
0.045412194006035474, 0.00074892673773073839,
0.027982626291575104],
'1000': [0.00072496317100067837, 0.0050467393277390162,
0.00086497751733137604, 0.00089920324643437053, 0.0,
0.0067766943624017126, 0.00020846580453668675,
0.0021437752138176273, 0.00093342897553745359,
0.00017424007543364797, 0.00034536872094884189,
3.4225729103038786e-05, 0.00065651171279464506,
0.0020753237556115495, 3.4225729103038786e-05,
0.0012787976964862954],
'1161': [0.003529009310349059, 0.024566751508094911,
0.0042105776320902656, 0.0043771832218492618, 0.0,
0.032987906772275442, 0.0010147795012591557,
0.010435568303993406, 0.0045437888116081886,
0.00084817391150020238, 0.0016812018602950531,
0.00016660558975895322, 0.0031957981308310683,
0.010102357124475537, 0.00016660558975895322,
0.0062249906719032255]}
RONA.pop_ronas = defaultdict(list)
for k, v in RONA_DICT.items():
RONA.pop_ronas[k] = v
RONA.corr_coef = {'63': 0.0077456870585954412,
'121': 0.039301225034401907,
'128': 0.016111926538904166,
'130': 0.18481379924616759,
'198': 0.023750233063372327,
'351': 0.017735489340372099,
'398': 0.011325573184287981,
'438': 0.018683796231953969,
'526': 0.00049854828620604728,
'548': 0.0031221991840612805,
'610': 0.078319942162650966,
'632': 0.11355218364094871,
'769': 0.22867817097379825,
'788': 0.083453021616355649,
'897': 0.04690162704365064,
'938': 0.038263261893703132,
'948': 0.12921058644023001,
'1000': 0.00031920701110584829,
'1161': 0.0052277858268105553}
RONA.avg_ronas = [0.013529574810934185, 0.090964013692765217,
0.016142582821629635, 0.0165731619528456, 0.0,
0.095626902254738605, 0.003890478593702105,
0.033254662531292369, 0.017420053404636298,
0.00325174330219878, 0.0064454197597154329,
0.00063790299842444922, 0.012252104227927531,
0.038719637257432278, 0.00063873529150333078,
0.023865473164351742]
RONA.stderr_ronas = [0.0012326560237869524, 0.0083588763235082613,
0.0014707226378230585, 0.0015670042881442531, 0.0,
0.010661619342990508, 0.00035445473645375874,
0.0031727094664367557, 0.0015871107602407116,
0.00029626067524493279, 0.00058723098128906303,
5.8066989771126626e-05, 0.0011162679013693004,
0.0035306274108324173, 5.8194061208826052e-05,
0.0021743417415297753]
RONAS['15'] = RONA
RONA = pr.RonaClass("16")
RONA_DICT = {'589': [0.019747076205946928, 0.051413558770438496,
0.086015417527705862, 0.035580317488192736,
0.00097845873092532458, 0.099802790554380544,
0.08992925245140708, 0.053788472527463597,
0.049456641308587894, 0.042518479398390312,
0.097845873092530025, 0.013876323820395157,
0.076141879424732495, 0.09882433182345525, 0.0,
0.022527513945124502]}
RONA.pop_ronas = defaultdict(list)
for k, v in RONA_DICT.items():
RONA.pop_ronas[k] = v
RONA.corr_coef = {'589': 0.17658228323645278}
RONA.avg_ronas = [0.019747076205946928, 0.051413558770438496,
0.086015417527705862, 0.035580317488192736,
0.00097845873092532458, 0.099802790554380544,
0.08992925245140708, 0.053788472527463597,
0.049456641308587894, 0.042518479398390312,
0.097845873092530025, 0.013876323820395157,
0.076141879424732495, 0.09882433182345525, 0.0,
0.022527513945124502]
RONA.stderr_ronas = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0]
RONAS['16'] = RONA
for r in RONAS.values():
r.pop_names = ['Algeria', 'Catalonia', 'Corsica', 'Haza_de_Lino',
'Kenitra', 'Landes', 'Monchique', 'Puglia', 'Sardinia',
'Sicilia', 'Sintra', 'Taza', 'Toledo', 'Tuscany', 'Tunisia',
'Var']
# Test functions
def test_basic_stats():
"""
Test the function basic_stats of pyRona.py.
"""
test_rona = pr.RonaClass('5')
test_rona.pop_ronas = RONAS['5'].pop_ronas
test_rona.corr_coef = RONAS['5'].corr_coef
test_rona.basic_stats(True)
assert test_rona.avg_ronas == RONAS['5'].avg_ronas
assert test_rona.stderr_ronas == RONAS['5'].stderr_ronas
def test_count_markers():
"""
Test the function count_markers of pyRona.py.
"""
test_nr = RONAS['5'].count_markers()
control_nr = 4
assert test_nr == control_nr
def test_calculate_rona(tmpdir):
"""
Test the function calculate_rona of pyRona.py.
"""
covar = "558"
test_rona = pr.RonaClass('5')
present_cov = np.array([114, 71, 59, 117, 107, 86, 93, 87, 80, 76, 71, 122,
118, 88, 98, 83])
future_cov = np.array([119.67, 72.78, 59.22, 120.11, 111.89, 91.22, 94.78,
88, 80.67, 76.89, 72.44, 128.67, 127.67, 91.11,
103.89, 83.78])
al_freq = np.array([0.1957648, -0.49158111, 0.3691684, 0.27138479,
-0.00115721, 0.29038402, -0.31815359, 0.23805522,
0.15150419, -0.31698508, 0.4346372, 0.13334157,
-0.20163875, 0.8946341, -0.38784942, -0.55615943])
plot = tmpdir.mkdir("ind_plots")
outlier = 0
rtype = "absdiff"
pr.calculate_rona(covar, test_rona, present_cov, future_cov, al_freq, plot,
outlier, rtype)
control_pop_rona_dict = {'558': [0.0014018084856003402,
0.00044007391611439851,
5.4391158171442511e-05,
0.00076889319051446367,
0.0012089671066288572,
0.0012905538438860258,
0.00044007391611439851,
0.00024723253714292048,
0.00016564579988575674,
0.00022003695805716098,
0.0003560148534858055,
0.001649041022743251,
0.0023907386341719839,
0.00076889319051452103,
0.0014561996437717636,
0.000192841378971478]}
# Dirty hack to adjust precision due to failing tests on Travis-CI:
cpr = {x: [round(item, 10) for item in y] for x, y in
control_pop_rona_dict.items()}
trpr = {x: [round(item, 10) for item in y] for x, y in
test_rona.pop_ronas.items()}
assert trpr == cpr
def test_results_summary():
"""
Test the function results_summary of pyRona.py.
"""
top_ronas = [RONAS['15'], RONAS['11'], RONAS['14']]
test_res_summary = pr.results_summary(top_ronas, True)
with open("../tests/data/jar/pyRona.results_summary.pickle", "rb") as fle:
control_results_summary = pickle.load(fle)
assert test_res_summary == control_results_summary
def test_ronas_filterer():
"""
Test the function ronas_filterer of pyRona.py.
"""
test_ronas_filtered = pr.ronas_filterer(RONAS, True, 3)
assert test_ronas_filtered[0] == RONAS['15']
assert test_ronas_filtered[1] == RONAS['11']
assert test_ronas_filtered[2] == RONAS['14']
def test_argument_parser():
"""
Test the function argument_parser of pyRona.py.
"""
args = ['baypass', '-pc', '..tests/data/ENVFILE', '-fc',
'..tests/data/ENVFILE_rpc85',
'-pop', '..tests/data/popnames_single_GEO.txt', '-beta',
'..tests/data/Qsuber_GBS_mcmc_aux_summary_betai.out', '-pij',
'..tests/data/Qsuber_GBS_mcmc_aux_summary_pij.out', '-out',
'/home/baptista/Music/LOL', '-bf', '20', '-remove-outliers',
'-draw-ind-plots', '/tmp/indplots']
test_arguments = pr.argument_parser(args)
control_arguments = (
"Namespace(bayes_factor=20.0, "
"baypass_pij_file='..tests/data/Qsuber_GBS_mcmc_aux_summary_pij.out',"
" baypass_summary_betai_file="
"'..tests/data/Qsuber_GBS_mcmc_aux_summary_betai.out',"
" future_covars_file='..tests/data/ENVFILE_rpc85',"
" immutables=[], map_filename=None, num_covars=3,"
" outfile='/home/baptista/Music/LOL',"
" outliers=True, plots='/tmp/indplots',"
" popnames_file='..tests/data/popnames_single_GEO.txt',"
" present_covars_file='..tests/data/ENVFILE', rtype='absdiff',"
" upstream='baypass', use_weights=True)")
assert str(test_arguments) == control_arguments
|
StuntsPT/pyRona
|
tests/test_pyRona.py
|
Python
|
gpl-3.0
| 35,454
|
class AssignmentsBook:
def __init__(self, chapters):
self.chapters = chapters
# def __repr__(self):
# return self.__str__(self)
def get_chapters(self):
return self.chapters
# def __str__(self):
# rep = ""
# for chapter in self.chapters:
# rep += str(chapter)
# return rep
class Chapter:
def __init__(self, name, assignments):
self.name = name
self.assignments = assignments
def get_name(self):
return self.name
def get_assignments(self):
return self.assignments
# def __repr__(self):
# return self.__str__(self)
# def __str__(self):
# rep = "Chapter: " + str(self.name) + ": \n"
# for assingment in self.assignments:
# rep += "\t" + str(assingment)
# return rep
class Assignment:
def __init__(self, name, id, code=None, description=None):
self.name = name
self.id = id
self.code = code
self.description = description
def get_name(self):
return self.name
def get_id(self):
return self.id
def get_description(self):
return self.description
def get_code(self):
return self.code
# def __repr__(self):
# return self.__str__(self)
def set_code(self, code):
self.code = code
# def __str__(self):
# return "{ Assignment: " + self.name + ": , id:" + self.id + "}"
|
varun-verma11/CodeDrill
|
djangoSRV/Views/exercise_data_structure.py
|
Python
|
bsd-2-clause
| 1,232
|
# Module containing non-deprecated functions borrowed from Numeric.
__docformat__ = "restructuredtext en"
# functions that are now methods
__all__ = ['take', 'reshape', 'choose', 'repeat', 'put',
'swapaxes', 'transpose', 'sort', 'argsort', 'argmax', 'argmin',
'searchsorted', 'alen',
'resize', 'diagonal', 'trace', 'ravel', 'nonzero', 'shape',
'compress', 'clip', 'sum', 'product', 'prod', 'sometrue', 'alltrue',
'any', 'all', 'cumsum', 'cumproduct', 'cumprod', 'ptp', 'ndim',
'rank', 'size', 'around', 'round_', 'mean', 'std', 'var', 'squeeze',
'amax', 'amin',
]
import multiarray as mu
import umath as um
import numerictypes as nt
from numeric import asarray, array, asanyarray, concatenate
_dt_ = nt.sctype2char
import types
try:
_gentype = types.GeneratorType
except AttributeError:
_gentype = types.NoneType
# save away Python sum
_sum_ = sum
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
try:
wrap = obj.__array_wrap__
except AttributeError:
wrap = None
result = getattr(asarray(obj),method)(*args, **kwds)
if wrap:
if not isinstance(result, mu.ndarray):
result = asarray(result)
result = wrap(result)
return result
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
This function does the same thing as "fancy" indexing (indexing arrays
using arrays); however, it can be easier to use if you need elements
along a given axis.
Parameters
----------
a : array_like
The source array.
indices : array_like
The indices of the values to extract.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
subarray : ndarray
The returned array has the same type as `a`.
See Also
--------
ndarray.take : equivalent method
Examples
--------
>>> a = [4, 3, 5, 7, 6, 8]
>>> indices = [0, 1, 4]
>>> np.take(a, indices)
array([4, 3, 6])
In this example if `a` is an ndarray, "fancy" indexing can be used.
>>> a = np.array(a)
>>> a[indices]
array([4, 3, 6])
"""
try:
take = a.take
except AttributeError:
return _wrapit(a, 'take', indices, axis, out, mode)
return take(indices, axis, out, mode)
# not deprecated --- copy if necessary, view otherwise
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
Parameters
----------
a : array_like
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
order : {'C', 'F'}, optional
Determines whether the array data should be viewed as in C
(row-major) order or FORTRAN (column-major) order.
Returns
-------
reshaped_array : ndarray
This will be a new view object if possible; otherwise, it will
be a copy.
See Also
--------
ndarray.reshape : Equivalent method.
Notes
-----
It is not always possible to change the shape of an array without
copying the data. If you want an error to be raise if the data is copied,
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
# A transpose make the array non-contiguous
>>> b = a.T
# Taking a view makes it possible to modify the shape without modiying the
# initial object.
>>> c = b.view()
>>> c.shape = (20)
AttributeError: incompatible shape for a non-contiguous array
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1, 2, 3, 4, 5, 6])
>>> np.reshape(a, 6, order='F')
array([1, 4, 2, 5, 3, 6])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1, 2],
[3, 4],
[5, 6]])
"""
try:
reshape = a.reshape
except AttributeError:
return _wrapit(a, 'reshape', newshape, order=order)
return reshape(newshape, order=order)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
First of all, if confused or uncertain, definitely look at the Examples -
in its full generality, this function is less simple than it might
seem from the following code description (below ndi =
`numpy.lib.index_tricks`):
``np.choose(a,c) == np.array([c[a[I]][I] for I in ndi.ndindex(a.shape)])``.
But this omits some subtleties. Here is a fully general summary:
Given an "index" array (`a`) of integers and a sequence of `n` arrays
(`choices`), `a` and each choice array are first broadcast, as necessary,
to arrays of a common shape; calling these *Ba* and *Bchoices[i], i =
0,...,n-1* we have that, necessarily, ``Ba.shape == Bchoices[i].shape``
for each `i`. Then, a new array with shape ``Ba.shape`` is created as
follows:
* if ``mode=raise`` (the default), then, first of all, each element of
`a` (and thus `Ba`) must be in the range `[0, n-1]`; now, suppose that
`i` (in that range) is the value at the `(j0, j1, ..., jm)` position
in `Ba` - then the value at the same position in the new array is the
value in `Bchoices[i]` at that same position;
* if ``mode=wrap``, values in `a` (and thus `Ba`) may be any (signed)
integer; modular arithmetic is used to map integers outside the range
`[0, n-1]` back into that range; and then the new array is constructed
as above;
* if ``mode=clip``, values in `a` (and thus `Ba`) may be any (signed)
integer; negative integers are mapped to 0; values greater than `n-1`
are mapped to `n-1`; and then the new array is constructed as above.
Parameters
----------
a : int array
This array must contain integers in `[0, n-1]`, where `n` is the number
of choices, unless ``mode=wrap`` or ``mode=clip``, in which cases any
integers are permissible.
choices : sequence of arrays
Choice arrays. `a` and all of the choices must be broadcastable to the
same shape. If `choices` is itself an array (not recommended), then
its outermost dimension (i.e., the one corresponding to
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
* 'raise' : an exception is raised
* 'wrap' : value becomes value mod `n`
* 'clip' : values < 0 are mapped to 0, values > n-1 are mapped to n-1
Returns
-------
merged_array : array
The merged result.
Raises
------
ValueError: shape mismatch
If `a` and each choice array are not all broadcastable to the same
shape.
See Also
--------
ndarray.choose : equivalent method
Notes
-----
To reduce the chance of misinterpretation, even though the following
"abuse" is nominally supported, `choices` should neither be, nor be
thought of as, a single array, i.e., the outermost sequence-like container
should be either a list or a tuple.
Examples
--------
>>> choices = [[0, 1, 2, 3], [10, 11, 12, 13],
... [20, 21, 22, 23], [30, 31, 32, 33]]
>>> np.choose([2, 3, 1, 0], choices
... # the first element of the result will be the first element of the
... # third (2+1) "array" in choices, namely, 20; the second element
... # will be the second element of the fourth (3+1) choice array, i.e.,
... # 31, etc.
... )
array([20, 31, 12, 3])
>>> np.choose([2, 4, 1, 0], choices, mode='clip') # 4 goes to 3 (4-1)
array([20, 31, 12, 3])
>>> # because there are 4 choice arrays
>>> np.choose([2, 4, 1, 0], choices, mode='wrap') # 4 goes to (4 mod 4)
array([20, 1, 12, 3])
>>> # i.e., 0
A couple examples illustrating how choose broadcasts:
>>> a = [[1, 0, 1], [0, 1, 0], [1, 0, 1]]
>>> choices = [-10, 10]
>>> np.choose(a, choices)
array([[ 10, -10, 10],
[-10, 10, -10],
[ 10, -10, 10]])
>>> # With thanks to Anne Archibald
>>> a = np.array([0, 1]).reshape((2,1,1))
>>> c1 = np.array([1, 2, 3]).reshape((1,3,1))
>>> c2 = np.array([-1, -2, -3, -4, -5]).reshape((1,1,5))
>>> np.choose(a, (c1, c2)) # result is 2x3x5, res[0,:,:]=c1, res[1,:,:]=c2
array([[[ 1, 1, 1, 1, 1],
[ 2, 2, 2, 2, 2],
[ 3, 3, 3, 3, 3]],
[[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5],
[-1, -2, -3, -4, -5]]])
"""
try:
choose = a.choose
except AttributeError:
return _wrapit(a, 'choose', choices, out=out, mode=mode)
return choose(choices, out=out, mode=mode)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : {int, array of ints}
The number of repetitions for each element. `repeats` is broadcasted
to fit the shape of the given axis.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
try:
repeat = a.repeat
except AttributeError:
return _wrapit(a, 'repeat', repeats, axis)
return repeat(repeats, axis)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
The indexing works on the flattened target array. `put` is roughly
equivalent to:
::
a.flat[ind] = v
Parameters
----------
a : ndarray
Target array.
ind : array_like
Target indices, interpreted as integers.
v : array_like
Values to place in `a` at target indices. If `v` is shorter than
`ind` it will be repeated as necessary.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
* 'raise' -- raise an error (default)
* 'wrap' -- wrap around
* 'clip' -- clip to the range
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
See Also
--------
putmask, place
Examples
--------
>>> a = np.arange(5)
>>> np.put(a, [0, 2], [-44, -55])
>>> a
array([-44, 1, -55, 3, 4])
>>> a = np.arange(5)
>>> np.put(a, 22, -5, mode='clip')
>>> a
array([ 0, 1, 2, 3, -5])
"""
return a.put(ind, v, mode)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
Parameters
----------
a : array_like
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
If `a` is an ndarray, then a view of `a` is returned; otherwise
a new array is created.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1],
[2],
[3]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.swapaxes(x,0,2)
array([[[0, 4],
[2, 6]],
[[1, 5],
[3, 7]]])
"""
try:
swapaxes = a.swapaxes
except AttributeError:
return _wrapit(a, 'swapaxes', axis1, axis2)
return swapaxes(axis1, axis2)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : array_like
Input array.
axes : list of ints, optional
By default, reverse the dimensions, otherwise permute the axes
according to the values given.
Returns
-------
p : ndarray
`a` with its axes permuted. A view is returned whenever
possible.
See Also
--------
rollaxis
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.transpose(x)
array([[0, 2],
[1, 3]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
try:
transpose = a.transpose
except AttributeError:
return _wrapit(a, 'transpose', axes)
return transpose(axes)
def sort(a, axis=-1, kind='quicksort', order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : array_like
Array to be sorted.
axis : int or None, optional
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm. Default is 'quicksort'.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
include all of the fields.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
See Also
--------
ndarray.sort : Method to sort an array in-place.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in a sorted array.
Notes
-----
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
order. The three available algorithms have the following
properties:
=========== ======= ============= ============ =======
kind speed worst case work space stable
=========== ======= ============= ============ =======
'quicksort' 1 O(n^2) 0 no
'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
=========== ======= ============= ============ =======
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
any other axis.
The sort order for complex numbers is lexicographic. If both the real
and imaginary parts are non-nan then the order is determined by the
real parts except when they are equal, in which case the order is
determined by the imaginary parts.
Previous to numpy 1.4.0 sorting real and complex arrays containing nan
values led to undefined behaviour. In numpy versions >= 1.4.0 nan
values are sorted to the end. The extended sort order is:
* Real: [R, nan]
* Complex: [R + Rj, R + nanj, nan + Rj, nan + nanj]
where R is a non-nan real value. Complex values with the same nan
placements are sorted according to the non-nan part if it exists.
Non-nan values are sorted as before.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured array:
>>> dtype = [('name', 'S10'), ('height', float), ('age', int)]
>>> values = [('Arthur', 1.8, 41), ('Lancelot', 1.9, 38),
... ('Galahad', 1.7, 38)]
>>> a = np.array(values, dtype=dtype) # create a structured array
>>> np.sort(a, order='height') # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Arthur', 1.8, 41),
('Lancelot', 1.8999999999999999, 38)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
Sort by age, then height if ages are equal:
>>> np.sort(a, order=['age', 'height']) # doctest: +SKIP
array([('Galahad', 1.7, 38), ('Lancelot', 1.8999999999999999, 38),
('Arthur', 1.8, 41)],
dtype=[('name', '|S10'), ('height', '<f8'), ('age', '<i4')])
"""
if axis is None:
a = asanyarray(a).flatten()
axis = 0
else:
a = asanyarray(a).copy()
a.sort(axis, kind, order)
return a
def argsort(a, axis=-1, kind='quicksort', order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : array_like
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort'}, optional
Sorting algorithm.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
specified.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified axis.
In other words, ``a[index_array]`` yields a sorted `a`.
See Also
--------
sort : Describes sorting algorithms used.
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
Notes
-----
See `sort` for notes on the different sorting algorithms.
As of NumPy 1.4.0 `argsort` works with real/complex arrays containing
nan values. The enhanced sort order is documented in `sort`.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> np.argsort(x, axis=0)
array([[0, 1],
[1, 0]])
>>> np.argsort(x, axis=1)
array([[0, 1],
[0, 1]])
Sorting with keys:
>>> x = np.array([(1, 0), (0, 1)], dtype=[('x', '<i4'), ('y', '<i4')])
>>> x
array([(1, 0), (0, 1)],
dtype=[('x', '<i4'), ('y', '<i4')])
>>> np.argsort(x, order=('x','y'))
array([1, 0])
>>> np.argsort(x, order=('y','x'))
array([0, 1])
"""
try:
argsort = a.argsort
except AttributeError:
return _wrapit(a, 'argsort', axis, kind, order)
return argsort(axis, kind, order)
def argmax(a, axis=None):
"""
Indices of the maximum values along an axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
Returns
-------
index_array : ndarray of ints
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
See Also
--------
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
Notes
-----
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
Examples
--------
>>> a = np.arange(6).reshape(2,3)
>>> a
array([[0, 1, 2],
[3, 4, 5]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
array([1, 1, 1])
>>> np.argmax(a, axis=1)
array([2, 2])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0, 5, 2, 3, 4, 5])
>>> np.argmax(b) # Only the first occurrence is returned.
1
"""
try:
argmax = a.argmax
except AttributeError:
return _wrapit(a, 'argmax', axis)
return argmax(axis)
def argmin(a, axis=None):
"""
Return the indices of the minimum values along an axis.
See Also
--------
argmax : Similar function. Please refer to `numpy.argmax` for detailed
documentation.
"""
try:
argmin = a.argmin
except AttributeError:
return _wrapit(a, 'argmin', axis)
return argmin(axis)
def searchsorted(a, v, side='left'):
"""
Find indices where elements should be inserted to maintain order.
Find the indices into a sorted array `a` such that, if the corresponding
elements in `v` were inserted before the indices, the order of `a` would
be preserved.
Parameters
----------
a : 1-D array_like
Input array, sorted in ascending order.
v : array_like
Values to insert into `a`.
side : {'left', 'right'}, optional
If 'left', the index of the first suitable location found is given. If
'right', return the last such index. If there is no suitable
index, return either 0 or N (where N is the length of `a`).
Returns
-------
indices : array of ints
Array of insertion points with the same shape as `v`.
See Also
--------
sort : Return a sorted copy of an array.
histogram : Produce histogram from 1-D data.
Notes
-----
Binary search is used to find the required insertion points.
As of Numpy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
Examples
--------
>>> np.searchsorted([1,2,3,4,5], 3)
2
>>> np.searchsorted([1,2,3,4,5], 3, side='right')
3
>>> np.searchsorted([1,2,3,4,5], [-10, 10, 2, 3])
array([0, 5, 1, 2])
"""
try:
searchsorted = a.searchsorted
except AttributeError:
return _wrapit(a, 'searchsorted', v, side)
return searchsorted(v, side)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new array
is filled with repeated copied of `a`. Note that this behavior is different
from a.resize(new_shape) which fills with zeros instead of repeated
copies of `a`.
Parameters
----------
a : array_like
Array to be resized.
new_shape : {tuple, int}
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The data
are repeated in the order that the data are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Examples
--------
>>> a=np.array([[0,1],[2,3]])
>>> np.resize(a,(1,4))
array([[0, 1, 2, 3]])
>>> np.resize(a,(2,4))
array([[0, 1, 2, 3],
[0, 1, 2, 3]])
"""
if isinstance(new_shape, (int, nt.integer)):
new_shape = (new_shape,)
a = ravel(a)
Na = len(a)
if not Na: return mu.zeros(new_shape, a.dtype.char)
total_size = um.multiply.reduce(new_shape)
n_copies = int(total_size / Na)
extra = total_size % Na
if total_size == 0:
return a[:0]
if extra != 0:
n_copies = n_copies+1
extra = Na-extra
a = concatenate( (a,)*n_copies)
if extra > 0:
a = a[:-extra]
return reshape(a, new_shape)
def squeeze(a):
"""
Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
Returns
-------
squeezed : ndarray
The input array, but with with all dimensions of length 1
removed. Whenever possible, a view on `a` is returned.
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
"""
try:
squeeze = a.squeeze
except AttributeError:
return _wrapit(a, 'squeeze')
return squeeze()
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
If `a` is 2-D, returns the diagonal of `a` with the given offset,
i.e., the collection of elements of the form `a[i,i+offset]`.
If `a` has more than two dimensions, then the axes specified
by `axis1` and `axis2` are used to determine the 2-D subarray
whose diagonal is returned. The shape of the resulting array
can be determined by removing `axis1` and `axis2` and appending
an index to the right equal to the size of the resulting diagonals.
Parameters
----------
a : array_like
Array from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to main diagonal (0).
axis1 : int, optional
Axis to be used as the first axis of the 2-D subarrays from which
the diagonals should be taken. Defaults to first axis (0).
axis2 : int, optional
Axis to be used as the second axis of the 2-D subarrays from which
the diagonals should be taken. Defaults to second axis (1).
Returns
-------
array_of_diagonals : ndarray
If `a` is 2-D, a 1-D array containing the diagonal is
returned. If `a` has larger dimensions, then an array of
diagonals is returned.
Raises
------
ValueError
If the dimension of `a` is less than 2.
See Also
--------
diag : Matlab workalike for 1-D and 2-D arrays.
diagflat : Create diagonal arrays.
trace : Sum along diagonals.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> a.diagonal()
array([0, 3])
>>> a.diagonal(1)
array([1])
>>> a = np.arange(8).reshape(2,2,2)
>>> a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> a.diagonal(0,-2,-1)
array([[0, 3],
[4, 7]])
"""
return asarray(a).diagonal(offset, axis1, axis2)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : array_like
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
dtype : dtype, optional
Determines the data-type of the returned array and of the accumulator
where the elements are summed. If dtype has the value None and `a` is
of integer type of precision less than the default integer
precision, then the default integer precision is used. Otherwise,
the precision is the same as that of `a`.
out : ndarray, optional
Array into which the output is placed. Its type is preserved and
it must be of the right shape to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
See Also
--------
diag, diagonal, diagflat
Examples
--------
>>> np.trace(np.eye(3))
3.0
>>> a = np.arange(8).reshape((2,2,2))
>>> np.trace(a)
array([6, 8])
>>> a = np.arange(24).reshape((2,2,2,3))
>>> np.trace(a).shape
(2, 3)
"""
return asarray(a).trace(offset, axis1, axis2, dtype, out)
def ravel(a, order='C'):
"""
Return a flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
a : array_like
Input array. The elements in `a` are read in the order specified by
`order`, and packed as a 1-D array.
order : {'C','F'}, optional
The elements of `a` are read in this order. It can be either
'C' for row-major order, or `F` for column-major order.
By default, row-major order is used.
Returns
-------
1d_array : ndarray
Output of the same dtype as `a`, and of shape ``(a.size(),)``.
See Also
--------
ndarray.flat : 1-D iterator over an array.
ndarray.flatten : 1-D array copy of the elements of an array
in row-major order.
Notes
-----
In row-major order, the row index varies the slowest, and the column
index the quickest. This can be generalized to multiple dimensions,
where row-major order implies that the index along the first axis
varies slowest, and the index along the last quickest. The opposite holds
for Fortran-, or column-major, mode.
Examples
--------
If an array is in C-order (default), then `ravel` is equivalent
to ``reshape(-1)``:
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print x.reshape(-1)
[1 2 3 4 5 6]
>>> print np.ravel(x)
[1 2 3 4 5 6]
When flattening using Fortran-order, however, we see
>>> print np.ravel(x, order='F')
[1 4 2 5 3 6]
"""
return asarray(a).ravel(order)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`, containing
the indices of the non-zero elements in that dimension. The
corresponding non-zero values can be obtained with::
a[nonzero(a)]
To group the indices by element, rather than dimension, use::
transpose(nonzero(a))
The result of this is always a 2-D array, with a row for
each non-zero element.
Parameters
----------
a : array_like
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
flatnonzero :
Return indices that are non-zero in the flattened version of the input
array.
ndarray.nonzero :
Equivalent ndarray method.
Examples
--------
>>> x = np.eye(3)
>>> x
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
>>> np.nonzero(x)
(array([0, 1, 2]), array([0, 1, 2]))
>>> x[np.nonzero(x)]
array([ 1., 1., 1.])
>>> np.transpose(np.nonzero(x))
array([[0, 0],
[1, 1],
[2, 2]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]], dtype=bool)
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
The ``nonzero`` method of the boolean array can also be called.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
"""
try:
nonzero = a.nonzero
except AttributeError:
res = _wrapit(a, 'nonzero')
else:
res = nonzero()
return res
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
alen
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
>>> a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
>>> np.shape(a)
(2,)
>>> a.shape
(2,)
"""
try:
result = a.shape
except AttributeError:
result = asarray(a).shape
return result
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
When working along a given axis, a slice along that axis is returned in
`output` for each index where `condition` evaluates to True. When
working on a 1-D array, `compress` is equivalent to `extract`.
Parameters
----------
condition : 1-D array of bools
Array that selects which entries to return. If len(condition)
is less than the size of `a` along the given axis, then output is
truncated to the length of the condition array.
a : array_like
Array from which to extract a part.
axis : int, optional
Axis along which to take slices. If None (default), work on the
flattened array.
out : ndarray, optional
Output array. Its type is preserved and it must be of the right
shape to hold the output.
Returns
-------
compressed_array : ndarray
A copy of `a` without the slices along axis for which `condition`
is false.
See Also
--------
take, choose, diag, diagonal, select
ndarray.compress : Equivalent method.
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.array([[1, 2], [3, 4], [5, 6]])
>>> a
array([[1, 2],
[3, 4],
[5, 6]])
>>> np.compress([0, 1], a, axis=0)
array([[3, 4]])
>>> np.compress([False, True, True], a, axis=0)
array([[3, 4],
[5, 6]])
>>> np.compress([False, True], a, axis=1)
array([[2],
[4],
[6]])
Working on the flattened array does not return slices along an axis but
selects elements.
>>> np.compress([False, True], a)
array([2])
"""
try:
compress = a.compress
except AttributeError:
return _wrapit(a, 'compress', condition, axis, out)
return compress(condition, axis, out)
def clip(a, a_min, a_max, out=None):
"""
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : array_like
Array containing elements to clip.
a_min : scalar or array_like
Minimum value.
a_max : scalar or array_like
Maximum value. If `a_min` or `a_max` are array_like, then they will
be broadcasted to the shape of `a`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1, 1, 2, 3, 4, 5, 6, 7, 8, 8])
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, 3, 6, out=a)
array([3, 3, 3, 3, 4, 5, 6, 6, 6, 6])
>>> a = np.arange(10)
>>> a
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.clip(a, [3,4,1,1,1,4,4,4,4,4], 8)
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
try:
clip = a.clip
except AttributeError:
return _wrapit(a, 'clip', a_min, a_max, out)
return clip(a_min, a_max, out)
def sum(a, axis=None, dtype=None, out=None):
"""
Sum of array elements over a given axis.
Parameters
----------
a : array_like
Elements to sum.
axis : integer, optional
Axis over which the sum is taken. By default `axis` is None,
and all elements are summed.
dtype : dtype, optional
The type of the returned array and of the accumulator in which
the elements are summed. By default, the dtype of `a` is used.
An exception is when `a` has an integer type with less precision
than the default platform integer. In that case, the default
platform integer is used instead.
out : ndarray, optional
Array into which the output is placed. By default, a new array is
created. If `out` is given, it must be of the appropriate shape
(the shape of `a` with `axis` removed, i.e.,
``numpy.delete(a.shape, axis)``). Its type is preserved. See
`doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
sum_along_axis : ndarray
An array with the same shape as `a`, with the specified
axis removed. If `a` is a 0-d array, or if `axis` is None, a scalar
is returned. If an output array is specified, a reference to
`out` is returned.
See Also
--------
ndarray.sum : Equivalent method.
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
mean, average
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> np.sum([0.5, 1.5])
2.0
>>> np.sum([0.5, 0.7, 0.2, 1.5], dtype=np.int32)
1
>>> np.sum([[0, 1], [0, 5]])
6
>>> np.sum([[0, 1], [0, 5]], axis=0)
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
-128
"""
if isinstance(a, _gentype):
res = _sum_(a)
if out is not None:
out[...] = res
return out
return res
try:
sum = a.sum
except AttributeError:
return _wrapit(a, 'sum', axis, dtype, out)
return sum(axis, dtype, out)
def product (a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
See Also
--------
prod : equivalent function; see for details.
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def sometrue(a, axis=None, out=None):
"""
Check whether some values are true.
Refer to `any` for full documentation.
See Also
--------
any : equivalent function
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def alltrue (a, axis=None, out=None):
"""
Check if all elements of input array are true.
See Also
--------
numpy.all : Equivalent function; see for details.
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def any(a,axis=None, out=None):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless `axis` is not ``None``
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which a logical OR is performed.
The default (`axis` = `None`) is to perform a logical OR
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape as the expected output and
the type is preserved. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
any : bool, ndarray
A new boolean or `ndarray` is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
--------
ndarray.any : equivalent method
all : Test whether all array elements along a given axis evaluate
to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False], dtype=bool)
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array([False])
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array([ True], dtype=bool), array([ True], dtype=bool))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
try:
any = a.any
except AttributeError:
return _wrapit(a, 'any', axis, out)
return any(axis, out)
def all(a,axis=None, out=None):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which a logical AND is performed.
The default (`axis` = `None`) is to perform a logical AND
over a flattened input array. `axis` may be negative, in which
case it counts from the last to the first axis.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape as the expected output and
the type is preserved. See `doc.ufuncs` (Section "Output
arguments") for more details.
Returns
-------
all : ndarray, bool
A new boolean or array is returned unless `out` is
specified, in which case a reference to `out` is returned.
See Also
--------
ndarray.all : equivalent method
any : Test whether any array element along a given axis evaluates to True.
Notes
-----
Not a Number (NaN), positive infinity and negative infinity
evaluate to `True` because these are not equal to zero.
Examples
--------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False], dtype=bool)
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array([False])
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z # doctest: +SKIP
(28293632, 28293632, array([ True], dtype=bool))
"""
try:
all = a.all
except AttributeError:
return _wrapit(a, 'all', axis, out)
return all(axis, out)
def cumsum (a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
See Also
--------
sum : Sum array elements.
trapz : Integration of array values using the composite trapezoidal rule.
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
try:
cumsum = a.cumsum
except AttributeError:
return _wrapit(a, 'cumsum', axis, dtype, out)
return cumsum(axis, dtype, out)
def cumproduct(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product over the given axis.
See Also
--------
cumprod : equivalent function; see for details.
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ptp(a, axis=None, out=None):
"""
Range of values (maximum - minimum) along an axis.
The name of the function comes from the acronym for 'peak to peak'.
Parameters
----------
a : array_like
Input values.
axis : int, optional
Axis along which to find the peaks. By default, flatten the
array.
out : array_like
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type of the output values will be cast if necessary.
Returns
-------
ptp : ndarray
A new array holding the result, unless `out` was
specified, in which case a reference to `out` is returned.
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0, 1],
[2, 3]])
>>> np.ptp(x, axis=0)
array([2, 2])
>>> np.ptp(x, axis=1)
array([1, 1])
"""
try:
ptp = a.ptp
except AttributeError:
return _wrapit(a, 'ptp', axis, out)
return ptp(axis, out)
def amax(a, axis=None, out=None):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
amax : ndarray
A new array or a scalar array with the result.
See Also
--------
nanmax : nan values are ignored instead of being propagated
fmax : same behavior as the C99 fmax function
argmax : Indices of the maximum values.
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding max value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmax.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amax(a)
3
>>> np.amax(a, axis=0)
array([2, 3])
>>> np.amax(a, axis=1)
array([1, 3])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
>>> np.nanmax(b)
4.0
"""
try:
amax = a.max
except AttributeError:
return _wrapit(a, 'max', axis, out)
return amax(axis, out)
def amin(a, axis=None, out=None):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which to operate. By default a flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
Returns
-------
amin : ndarray
A new array or a scalar array with the result.
See Also
--------
nanmin: nan values are ignored instead of being propagated
fmin: same behavior as the C99 fmin function
argmin: Return the indices of the minimum values.
amax, nanmax, fmax
Notes
-----
NaN values are propagated, that is if at least one item is nan, the
corresponding min value will be nan as well. To ignore NaN values (matlab
behavior), please use nanmin.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0, 1],
[2, 3]])
>>> np.amin(a) # Minimum of the flattened array
0
>>> np.amin(a, axis=0) # Minima along the first axis
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
>>> b = np.arange(5, dtype=np.float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
>>> np.nanmin(b)
0.0
"""
try:
amin = a.min
except AttributeError:
return _wrapit(a, 'min', axis, out)
return amin(axis, out)
def alen(a):
"""
Return the length of the first dimension of the input array.
Parameters
----------
a : array_like
Input array.
Returns
-------
l : int
Length of the first dimension of `a`.
See Also
--------
shape, size
Examples
--------
>>> a = np.zeros((7,4,5))
>>> a.shape[0]
7
>>> np.alen(a)
7
"""
try:
return len(a)
except TypeError:
return len(array(a,ndmin=1))
def prod(a, axis=None, dtype=None, out=None):
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis over which the product is taken. By default, the product
of all elements is calculated.
dtype : data-type, optional
The data-type of the returned array, as well as of the accumulator
in which the elements are multiplied. By default, if `a` is of
integer type, `dtype` is the default platform integer. (Note: if
the type of `a` is unsigned, then so is `dtype`.) Otherwise,
the dtype is the same as that of `a`.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the
output values will be cast if necessary.
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
See Also
--------
ndarray.prod : equivalent method
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
>>> np.prod(x) #random
16
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == np.int
True
"""
try:
prod = a.prod
except AttributeError:
return _wrapit(a, 'prod', axis, dtype, out)
return prod(axis, dtype, out)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative product is computed. By default the
input is flattened.
dtype : dtype, optional
Type of the returned array, as well as of the accumulator in which
the elements are multiplied. If dtype is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a precision
less than that of the default platform integer. In that case, the
default platform integer is used instead.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type of the resulting values will be cast if necessary.
Returns
-------
cumprod : ndarray
A new array holding the result is returned unless `out` is
specified, in which case a reference to out is returned.
See Also
--------
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
Arithmetic is modular when using integer types, and no error is
raised on overflow.
Examples
--------
>>> a = np.array([1,2,3])
>>> np.cumprod(a) # intermediate results 1, 1*2
... # total product 1*2*3 = 6
array([1, 2, 6])
>>> a = np.array([[1, 2, 3], [4, 5, 6]])
>>> np.cumprod(a, dtype=float) # specify type of output
array([ 1., 2., 6., 24., 120., 720.])
The cumulative product for each column (i.e., over the rows of)
`a`:
>>> np.cumprod(a, axis=0)
array([[ 1, 2, 3],
[ 4, 10, 18]])
The cumulative product for each row (i.e. over the columns of)
`a`:
>>> np.cumprod(a,axis=1)
array([[ 1, 2, 6],
[ 4, 20, 120]])
"""
try:
cumprod = a.cumprod
except AttributeError:
return _wrapit(a, 'cumprod', axis, dtype, out)
return cumprod(axis, dtype, out)
def ndim(a):
"""
Return the number of dimensions of an array.
Parameters
----------
a : array_like
Input array. If it is not already an ndarray, a conversion is
attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in `a`. Scalars are zero-dimensional.
See Also
--------
ndarray.ndim : equivalent method
shape : dimensions of array
ndarray.shape : dimensions of array
Examples
--------
>>> np.ndim([[1,2,3],[4,5,6]])
2
>>> np.ndim(np.array([[1,2,3],[4,5,6]]))
2
>>> np.ndim(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def rank(a):
"""
Return the number of dimensions of an array.
If `a` is not already an array, a conversion is attempted.
Scalars are zero dimensional.
Parameters
----------
a : array_like
Array whose number of dimensions is desired. If `a` is not an array,
a conversion is attempted.
Returns
-------
number_of_dimensions : int
The number of dimensions in the array.
See Also
--------
ndim : equivalent function
ndarray.ndim : equivalent property
shape : dimensions of array
ndarray.shape : dimensions of array
Notes
-----
In the old Numeric package, `rank` was the term used for the number of
dimensions, but in Numpy `ndim` is used instead.
Examples
--------
>>> np.rank([1,2,3])
1
>>> np.rank(np.array([[1,2,3],[4,5,6]]))
2
>>> np.rank(1)
0
"""
try:
return a.ndim
except AttributeError:
return asarray(a).ndim
def size(a, axis=None):
"""
Return the number of elements along a given axis.
Parameters
----------
a : array_like
Input data.
axis : int, optional
Axis along which the elements are counted. By default, give
the total number of elements.
Returns
-------
element_count : int
Number of elements along the specified axis.
See Also
--------
shape : dimensions of array
ndarray.shape : dimensions of array
ndarray.size : number of elements in array
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6]])
>>> np.size(a)
6
>>> np.size(a,1)
3
>>> np.size(a,0)
2
"""
if axis is None:
try:
return a.size
except AttributeError:
return asarray(a).size
else:
try:
return a.shape[axis]
except AttributeError:
return asarray(a).shape[axis]
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
Parameters
----------
a : array_like
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary. See `doc.ufuncs` (Section
"Output arguments") for details.
Returns
-------
rounded_array : ndarray
An array of the same type as `a`, containing the rounded values.
Unless `out` was specified, a new array is created. A reference to
the result is returned.
The real and imaginary parts of complex numbers are rounded
separately. The result of rounding a float is a float.
See Also
--------
ndarray.round : equivalent method
ceil, fix, floor, rint, trunc
Notes
-----
For values exactly halfway between rounded decimal values, Numpy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
to the inexact representation of decimal fractions in the IEEE
floating point standard [1]_ and errors introduced when scaling
by powers of ten.
References
----------
.. [1] "Lecture Notes on the Status of IEEE 754", William Kahan,
http://www.cs.berkeley.edu/~wkahan/ieee754status/IEEE754.PDF
.. [2] "How Futile are Mindless Assessments of
Roundoff in Floating-Point Computation?", William Kahan,
http://www.cs.berkeley.edu/~wkahan/Mindless.pdf
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
array([ 0, 0, 0, 10])
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def round_(a, decimals=0, out=None):
"""
Round an array to the given number of decimals.
Refer to `around` for full documentation.
See Also
--------
around : equivalent function
"""
try:
round = a.round
except AttributeError:
return _wrapit(a, 'round', decimals, out)
return round(decimals, out)
def mean(a, axis=None, dtype=None, out=None):
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements. The average is taken
over the flattened array by default, otherwise over the specified
axis. float64 intermediate and return values are used for integer
inputs.
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
dtype : dtype, optional
Type to use in computing the mean. For integer inputs, the default
is float64; for floating point, inputs it is the same as the input
dtype.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type will be cast if
necessary. See `doc.ufuncs` for details.
Returns
-------
m : ndarray, see dtype parameter above
If `out=None`, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
See Also
--------
average : Weighted average
Notes
-----
The arithmetic mean is the sum of the elements along the axis divided
by the number of elements.
Note that for floating-point input, the mean is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
array([ 2., 3.])
>>> np.mean(a, axis=1)
array([ 1.5, 3.5])
In single precision, `mean` can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.mean(a)
0.546875
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
0.55000000074505806
"""
try:
mean = a.mean
except AttributeError:
return _wrapit(a, 'mean', axis, dtype, out)
return mean(axis, dtype, out)
def std(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : int, optional
Axis along which the standard deviation is computed. The default is
to compute the standard deviation of the flattened array.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
See Also
--------
var, mean
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The standard deviation is the square root of the average of the squared
deviations from the mean, i.e., ``std = sqrt(mean(abs(x - x.mean())**2))``.
The average squared deviation is normally calculated as ``x.sum() / N``, where
``N = len(x)``. If, however, `ddof` is specified, the divisor ``N - ddof``
is used instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables. The standard deviation computed in this function
is the square root of the estimated variance, so even with ``ddof=1``, it
will not be an unbiased estimate of the standard deviation per se.
Note that, for complex numbers, `std` takes the absolute
value before squaring, so that the result is always real and nonnegative.
For floating-point input, the *std* is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949
>>> np.std(a, axis=0)
array([ 1., 1.])
>>> np.std(a, axis=1)
array([ 0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.std(a)
0.45172946707416706
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
0.44999999925552653
"""
try:
std = a.std
except AttributeError:
return _wrapit(a, 'std', axis, dtype, out, ddof)
return std(axis, dtype, out, ddof)
def var(a, axis=None, dtype=None, out=None, ddof=0):
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by default,
otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the variance is computed. The default is to compute
the variance of the flattened array.
dtype : dtype, optional
Type to use in computing the variance. For arrays of integer type
the default is float32; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
Returns
-------
variance : ndarray, see dtype parameter above
If out=None, returns a new array containing the variance; otherwise
a reference to the output array is returned.
See Also
--------
std : Standard deviation
mean : Average
numpy.doc.ufuncs : Section "Output arguments"
Notes
-----
The variance is the average of the squared deviations from the mean,
i.e., ``var = mean(abs(x - x.mean())**2)``.
The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
If, however, `ddof` is specified, the divisor ``N - ddof`` is used
instead. In standard statistical practice, ``ddof=1`` provides an
unbiased estimator of the variance of the infinite population. ``ddof=0``
provides a maximum likelihood estimate of the variance for normally
distributed variables.
Note that for complex numbers, the absolute value is taken before
squaring, so that the result is always real and nonnegative.
For floating-point input, the variance is computed using the same
precision the input has. Depending on the input data, this can cause
the results to be inaccurate, especially for float32 (see example below).
Specifying a higher-accuracy accumulator using the `dtype` keyword can
alleviate this issue.
Examples
--------
>>> a = np.array([[1,2],[3,4]])
>>> np.var(a)
1.25
>>> np.var(a,0)
array([ 1., 1.])
>>> np.var(a,1)
array([ 0.25, 0.25])
In single precision, var() can be inaccurate:
>>> a = np.zeros((2,512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.var(a)
0.20405951142311096
Computing the standard deviation in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
0.20249999932997387
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.20250000000000001
"""
try:
var = a.var
except AttributeError:
return _wrapit(a, 'var', axis, dtype, out, ddof)
return var(axis, dtype, out, ddof)
|
Ademan/NumPy-GSoC
|
numpy/core/fromnumeric.py
|
Python
|
bsd-3-clause
| 71,769
|
#!/usr/bin/env python3
# Copyright (c) 2014-2017 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
sys.exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the shillingcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 34621)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 33813)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main()
|
yavwa/Shilling
|
contrib/seeds/generate-seeds.py
|
Python
|
mit
| 4,351
|
# -*- coding: utf-8 -*-
#
# Cherokee-admin
#
# Authors:
# Alvaro Lopez Ortega <alvaro@alobbs.com>
#
# Copyright (C) 2010 Alvaro Lopez Ortega
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of version 2 of the GNU General Public
# License as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
#
import CTK
import Auth
import validations
URL_APPLY = '/plugin/plain/apply'
HELPS = [('modules_validators_plain', _("Plain text"))]
NOTE_PASSWD = N_("Full path to the plain text password file.")
class Plugin_plain (Auth.PluginAuth):
def __init__ (self, key, **kwargs):
Auth.PluginAuth.__init__ (self, key, **kwargs)
self.AddCommon (supported_methods=('basic', 'digest'))
table = CTK.PropsTable()
table.Add (_("Password File"), CTK.TextCfg("%s!passwdfile"%(self.key), False), _(NOTE_PASSWD))
submit = CTK.Submitter (URL_APPLY)
submit += table
self += CTK.RawHTML ("<h2>%s</h2>" % (_('Plain Password File')))
self += CTK.Indenter (submit)
# Publish
VALS = [("%s!passwdfile"%(self.key), validations.is_local_file_exists)]
CTK.publish ('^%s'%(URL_APPLY), CTK.cfg_apply_post, validation=VALS, method="POST")
|
chetan/cherokee
|
admin/plugins/plain.py
|
Python
|
gpl-2.0
| 1,703
|
from math import fabs
"""Kept these functions outside the class, since they are static
for the search and movement functions for board. The downside is it creates
an object for search purposes, which seems relatively heavy. I'll
optimize later if necessary
"""
def shift_up(pos):
"""returns new position that has shifted up"""
return Position(pos.x, pos.y + 1)
def shift_down(pos):
"""returns new position that has shifted down"""
return Position(pos.x, pos.y - 1)
def shift_right(pos):
"""returns new position that has shifted right"""
return Position(pos.x + 1, pos.y)
def shift_left(pos):
"""returns new position that has shifted left"""
return Position(pos.x - 1, pos.y)
def shift_up_left(pos):
"""returns new position that has shifted up"""
return Position(pos.x + 1, pos.y - 1)
def shift_down_left(pos):
"""returns new position that has shifted down"""
return Position(pos.x - 1, pos.y - 1)
def shift_up_right(pos):
"""returns new position that has shifted right"""
return Position(pos.x + 1, pos.y + 1)
def shift_down_right(pos):
"""returns new position that has shifted left"""
return Position(pos.x - 1, pos.y + 1)
class Position(object):
def __init__(self, x, y):
self._x, self._y = x, y
# TODO: test the speed of this implementation
# def __cmp__(self, other):
# if (self.width != other.width):
# return cmp(self.width, other.width)
# return cmp(self.height, other.height)
def __eq__(self, pos):
return self._x == pos.x and self._y == pos.y
def __ne__(self, pos):
return self._x != pos.x or self._y != pos.y
def __hash__(self):
return hash(('x', self._x, 'y', self._y))
def __repr__(self):
return '({0},{1})'.format(self._x, self._y)
def __str__(self):
return '({0},{1})'.format(self._x, self._y)
# ##################### Accessors/Modifiers ###############################
@property
def x(self):
return self._x
@property
def y(self):
return self._y
# ############################### Discovery ###############################
def is_diagonal(self, pos):
"""Verify if points are diagonal"""
return fabs(self.x - pos.x) == fabs(self.y - pos.y)
def is_parallel(self, pos):
"""Verify if points are parallel"""
return self.x == pos.x or self.y == pos.y
def is_adj(self, pos):
"""Verify if points are adjacent.
checks parallel on x plane if y +/- 1 is adj
checks parallel on y plane if x +/- 1 is adj
check if diagonal and if only 1 square away on the x plane
check if diagonal and if only 1 square away on the y plane
"""
return ((self.x == pos.x and fabs(self.y - pos.y)) == 1) \
or ((self.y == pos.y and fabs(self.x - pos.x)) == 1) \
or ((self.is_diagonal(pos) and fabs(self.y - pos.y)) == 1) \
or ((self.is_diagonal(pos) and fabs(self.x - pos.x)) == 1)
def to_json(self):
return {'x': self.x, 'y': self.y}
|
aelkikhia/pyduel_engine
|
pyduel_engine/model/position.py
|
Python
|
apache-2.0
| 3,110
|
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
"""Functional test suite for the controllers of the application."""
|
lmacken/moksha
|
moksha/tests/functional/__init__.py
|
Python
|
apache-2.0
| 710
|
"""
High-level functional tests for enc.
"""
import pytest
import enc
import django
import os
class TestFunctionalDumpToYAML():
"""
Various tests based on data manually entered into the admin,
and then dumped using the enc/fixtures/dump.sh then translated
to python object creation statements using enc/fixtures/dump_to_creates.py
these tests SHOULD GO AWAY once we have real unit and integration tests;
they're only intended to be high level functional tests (given admin input
yields a given YAML file) put in place before we change any application code.
"""
# mark everything in the class as requiring DB access, and functional tests
pytestmark = [pytest.mark.django_db, pytest.mark.functional]
def test_functional_dump2yaml_20131125_194117(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-41-17.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:52 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
to_save.append(node1)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-41-17.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_194204(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-42-04.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:52 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
to_save.append(node1)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-42-04.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_194500(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-45-00.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:52 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-45-00.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_194535(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-45-35.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:52 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-45-35.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_194651(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-46-51.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:52 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
groupclass2 = enc.models.GroupClass.objects.create(classname='class_group1_bar', classparams={u'bar_grp1': u'baz'}, group=group1)
to_save.append(groupclass2)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
groupparameter3 = enc.models.GroupParameter.objects.create(paramkey='param_group1_baz', paramvalue={u'foo': u'param_group1_baz'}, group=group1)
to_save.append(groupparameter3)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-46-51.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_194743(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-47-43.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:52 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
groupclass2 = enc.models.GroupClass.objects.create(classname='class_group1_bar', classparams={u'bar_grp1': u'baz'}, group=group1)
to_save.append(groupclass2)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
groupparameter3 = enc.models.GroupParameter.objects.create(paramkey='param_group1_baz', paramvalue={u'foo': u'param_group1_baz'}, group=group1)
to_save.append(groupparameter3)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
classexclusion1 = enc.models.ClassExclusion.objects.create(node=node1, exclusion='class_group1_bar')
to_save.append(classexclusion1)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-47-43.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_194832(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-48-32.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:52 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
groupclass2 = enc.models.GroupClass.objects.create(classname='class_group1_bar', classparams={u'bar_grp1': u'baz'}, group=group1)
to_save.append(groupclass2)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
groupparameter3 = enc.models.GroupParameter.objects.create(paramkey='param_group1_baz', paramvalue={u'foo': u'param_group1_baz'}, group=group1)
to_save.append(groupparameter3)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
paramexclusion1 = enc.models.ParamExclusion.objects.create(node=node1, exclusion='param_group1_baz')
to_save.append(paramexclusion1)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-48-32.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_195012(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-50-12.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:53 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
group2 = enc.models.Group.objects.create(name='group2', description='group2')
to_save.append(group2)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
groupclass2 = enc.models.GroupClass.objects.create(classname='class_group1_bar', classparams={u'bar_grp1': u'baz'}, group=group1)
to_save.append(groupclass2)
groupclass3 = enc.models.GroupClass.objects.create(classname='cls_grp2', classparams=None, group=group2)
to_save.append(groupclass3)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
groupparameter3 = enc.models.GroupParameter.objects.create(paramkey='param_group1_baz', paramvalue={u'foo': u'param_group1_baz'}, group=group1)
to_save.append(groupparameter3)
groupparameter4 = enc.models.GroupParameter.objects.create(paramkey='param_grp2', paramvalue={u'foo': u'param_grp2'}, group=group2)
to_save.append(groupparameter4)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
paramexclusion1 = enc.models.ParamExclusion.objects.create(node=node1, exclusion='param_group1_baz')
to_save.append(paramexclusion1)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-50-12.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_195038(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-50-38.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:53 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group2 = enc.models.Group.objects.create(name='group2', description='group2')
to_save.append(group2)
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
group1.parents.add(group2)
to_save.append(group1)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
groupclass2 = enc.models.GroupClass.objects.create(classname='class_group1_bar', classparams={u'bar_grp1': u'baz'}, group=group1)
to_save.append(groupclass2)
groupclass3 = enc.models.GroupClass.objects.create(classname='cls_grp2', classparams=None, group=group2)
to_save.append(groupclass3)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
groupparameter3 = enc.models.GroupParameter.objects.create(paramkey='param_group1_baz', paramvalue={u'foo': u'param_group1_baz'}, group=group1)
to_save.append(groupparameter3)
groupparameter4 = enc.models.GroupParameter.objects.create(paramkey='param_grp2', paramvalue={u'foo': u'param_grp2'}, group=group2)
to_save.append(groupparameter4)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
paramexclusion1 = enc.models.ParamExclusion.objects.create(node=node1, exclusion='param_group1_baz')
to_save.append(paramexclusion1)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-50-38.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_195122(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-51-22.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:53 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
group2 = enc.models.Group.objects.create(name='group2', description='group2')
group2.parents.add(group1)
to_save.append(group2)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
groupclass2 = enc.models.GroupClass.objects.create(classname='class_group1_bar', classparams={u'bar_grp1': u'baz'}, group=group1)
to_save.append(groupclass2)
groupclass3 = enc.models.GroupClass.objects.create(classname='cls_grp2', classparams=None, group=group2)
to_save.append(groupclass3)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
groupparameter3 = enc.models.GroupParameter.objects.create(paramkey='param_group1_baz', paramvalue={u'foo': u'param_group1_baz'}, group=group1)
to_save.append(groupparameter3)
groupparameter4 = enc.models.GroupParameter.objects.create(paramkey='param_grp2', paramvalue={u'foo': u'param_grp2'}, group=group2)
to_save.append(groupparameter4)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
paramexclusion1 = enc.models.ParamExclusion.objects.create(node=node1, exclusion='param_group1_baz')
to_save.append(paramexclusion1)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-51-22.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_195222(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-52-22.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:53 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
group2 = enc.models.Group.objects.create(name='group2', description='group2')
group2.parents.add(group1)
to_save.append(group2)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
groupclass2 = enc.models.GroupClass.objects.create(classname='class_group1_bar', classparams={u'bar_grp1': u'baz'}, group=group1)
to_save.append(groupclass2)
groupclass3 = enc.models.GroupClass.objects.create(classname='cls_grp2', classparams=None, group=group2)
to_save.append(groupclass3)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
groupparameter3 = enc.models.GroupParameter.objects.create(paramkey='param_group1_baz', paramvalue={u'foo': u'param_group1_baz'}, group=group1)
to_save.append(groupparameter3)
groupparameter4 = enc.models.GroupParameter.objects.create(paramkey='param_grp2', paramvalue={u'foo': u'param_grp2'}, group=group2)
to_save.append(groupparameter4)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
paramexclusion1 = enc.models.ParamExclusion.objects.create(node=node1, exclusion='param_group1_baz')
to_save.append(paramexclusion1)
paramexclusion2 = enc.models.ParamExclusion.objects.create(node=node1, exclusion='param_grp2')
to_save.append(paramexclusion2)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-52-22.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
def test_functional_dump2yaml_20131125_195241(self, client):
"""
High-level functional object dump-to-yaml tests
generated from enc/fixtures/2013-11-25_19-52-41.json as of rev 29a0f34
generated by enc/fixtures/dump_to_creates.py by 2013-12-07 21:04:53 (jason@jasonantman.com on jantman) at palantir
"""
to_save = []
group1 = enc.models.Group.objects.create(name='group1', description='groupOne')
to_save.append(group1)
group2 = enc.models.Group.objects.create(name='group2', description='group2')
group2.parents.add(group1)
to_save.append(group2)
node1 = enc.models.Node.objects.create(hostname='testnode', description='testnode_description')
node1.excluded_groups.add(group2)
node1.groups.add(group1)
to_save.append(node1)
groupclass1 = enc.models.GroupClass.objects.create(classname='class_group1_foo', classparams={u'foo_grp1': u'bar_grp1'}, group=group1)
to_save.append(groupclass1)
groupclass2 = enc.models.GroupClass.objects.create(classname='class_group1_bar', classparams={u'bar_grp1': u'baz'}, group=group1)
to_save.append(groupclass2)
groupclass3 = enc.models.GroupClass.objects.create(classname='cls_grp2', classparams=None, group=group2)
to_save.append(groupclass3)
nodeclass1 = enc.models.NodeClass.objects.create(node=node1, classname='barclass', classparams=None)
to_save.append(nodeclass1)
groupparameter2 = enc.models.GroupParameter.objects.create(paramkey='param_group1_bar', paramvalue={u'fooG1param': u'bar'}, group=group1)
to_save.append(groupparameter2)
groupparameter3 = enc.models.GroupParameter.objects.create(paramkey='param_group1_baz', paramvalue={u'foo': u'param_group1_baz'}, group=group1)
to_save.append(groupparameter3)
groupparameter4 = enc.models.GroupParameter.objects.create(paramkey='param_grp2', paramvalue={u'foo': u'param_grp2'}, group=group2)
to_save.append(groupparameter4)
nodeparameter2 = enc.models.NodeParameter.objects.create(node=node1, paramkey='foo_param', paramvalue={u'foo': u'bar'})
to_save.append(nodeparameter2)
paramexclusion1 = enc.models.ParamExclusion.objects.create(node=node1, exclusion='param_group1_baz')
to_save.append(paramexclusion1)
for o in to_save:
o.save()
with open('enc/fixtures/2013-11-25_19-52-41.yaml', 'r') as fh:
yaml = fh.read()
response = client.get('/enc/puppet/testnode', CONTENT_TYPE='application/json')
assert response.status_code == 200
assert response.content == yaml
|
jantman/nodemeister
|
enc/tests/test_functional_dump2yaml.py
|
Python
|
apache-2.0
| 25,715
|
# coding: utf-8
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
import shlex
from HTMLParser import HTMLParser
from tempfile import NamedTemporaryFile
from os import unlink
from subprocess import Popen, PIPE
from mimetypes import guess_type
from re import compile as regexp_compile, DOTALL, escape
import cld
import magic
from pypln.backend.celery_task import PyPLNTask
regexp_tags = regexp_compile(r'(<[ \t]*([a-zA-Z0-9!"./_-]*)[^>]*>)', flags=DOTALL)
regexp_comment = regexp_compile(r'<!--.*?-->', flags=DOTALL)
regexp_spaces_start = regexp_compile('([\n]+)[ \t]*',
flags=DOTALL)
regexp_spaces_end = regexp_compile('[ \t]*\n', flags=DOTALL)
regexp_newlines = regexp_compile('[\n]{3,}', flags=DOTALL)
regexp_spaces = regexp_compile('[ \t]{2,}', flags=DOTALL)
regexp_punctuation = regexp_compile('[ \t]*([' + escape('!,.:;?') + '])',
flags=DOTALL)
breakline_tags = ['table', '/table', 'tr', 'div', '/div', 'h1', '/h1', 'h2',
'/h2', 'h3', '/h3', 'h4', '/h4', 'h5', '/h5', 'h6', '/h6',
'br', 'br/']
double_breakline = ['table', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6']
def clean(text):
text = regexp_spaces_start.sub(r'\1', text)
text = regexp_spaces_end.sub('\n', text)
text = regexp_newlines.sub('\n\n', text)
text = regexp_spaces.sub(' ', text)
text = regexp_punctuation.sub(r'\1', text)
return text.strip()
def parse_html(html, remove_tags=None, remove_inside=None,
replace_space_with=' ', replace_newline_with='\n'):
html = regexp_comment.sub('', html.replace('\n', ''))
data = regexp_tags.split(html)
content_between = data[::3]
complete_tags = data[1::3]
tag_names = [x.lower() for x in data[2::3]]
for index, tag_name in enumerate(tag_names):
if not tag_name.strip():
continue
search_tag = tag_name
if tag_name and tag_name[0] == '/':
search_tag = tag_name[1:]
if remove_tags and search_tag not in remove_inside:
if tag_name in breakline_tags:
if search_tag in double_breakline:
complete_tags[index] = 2 * replace_newline_with
else:
complete_tags[index] = replace_newline_with
else:
complete_tags[index] = replace_space_with
if remove_inside and tag_name in remove_inside:
remove_to = tag_names.index('/' + tag_name, index)
total_to_remove = remove_to - index + 1
complete_tags[index:remove_to + 1] = [''] * total_to_remove
content_between[index + 2:remove_to + 1] = \
[''] * (total_to_remove - 2)
content_between[index + 1] = '\n'
complete_tags.append('')
result = ''.join(sum(zip(content_between, complete_tags), tuple()))
return clean(result)
def get_pdf_metadata(data):
lines = data.strip().splitlines()
metadata = {}
for line in lines:
try:
key, value = line[:line.index(':')], line[line.index(':') + 1:]
except ValueError:
continue
metadata[key.strip()] = value.strip()
return metadata
def extract_pdf(data):
temp = NamedTemporaryFile(delete=False)
filename = temp.name
temp.close()
pdf2html = Popen(shlex.split('pdftohtml -q -i - {}'.format(temp.name)),
stdin=PIPE, stdout=PIPE, stderr=PIPE)
html, html_err = pdf2html.communicate(input=data)
fp = open(filename + 's.html', 'r')
html = fp.read()
fp.close()
unlink(filename + '.html')
unlink(filename + '_ind.html')
unlink(filename + 's.html')
text = parse_html(html.replace(' ', ' '), True, ['script', 'style'])
pdfinfo = Popen(shlex.split('pdfinfo -'), stdin=PIPE, stdout=PIPE,
stderr=PIPE)
meta_out, meta_err = pdfinfo.communicate(input=data)
try:
metadata = get_pdf_metadata(meta_out)
except:
metadata = {}
#TODO: what should I do here?
if not (text and metadata):
return '', {}
elif not html_err:
return text, {} if meta_err else metadata
else:
return '', {}
def trial_decode(text):
"""
Tries to detect text encoding using `magic`. If the detected encoding is
not supported, try utf-8, iso-8859-1 and ultimately falls back to decoding
as utf-8 replacing invalid chars with `U+FFFD` (the replacement character).
This is far from an ideal solution, but the extractor and the rest of the
pipeline need an unicode object.
"""
with magic.Magic(flags=magic.MAGIC_MIME_ENCODING) as m:
content_encoding = m.id_buffer(text)
forced_decoding = False
try:
result = text.decode(content_encoding)
except LookupError:
# If the detected encoding is not supported, we try to decode it as
# utf-8.
try:
result = text.decode('utf-8')
except UnicodeDecodeError:
# Is there a better way of doing this than nesting try/except
# blocks? This smells really bad.
try:
result = text.decode('iso-8859-1')
except UnicodeDecodeError:
# If neither utf-8 nor iso-885901 work are capable of handling
# this text, we just decode it using utf-8 and replace invalid
# chars with U+FFFD.
# Two somewhat arbitrary decisions were made here: use utf-8
# and use 'replace' instead of 'ignore'.
result = text.decode('utf-8', 'replace')
forced_decoding = True
return result, forced_decoding
class Extractor(PyPLNTask):
#TODO: need to verify some exceptions when trying to convert 'evil' PDFs
#TODO: should 'replace_with' be '' when extracting from HTML?
def process(self, file_data):
with magic.Magic(flags=magic.MAGIC_MIME_TYPE) as m:
file_mime_type = m.id_buffer(file_data['contents'])
metadata = {}
if file_mime_type == 'text/plain':
text = file_data['contents']
elif file_mime_type == 'text/html':
text = parse_html(file_data['contents'], True, ['script', 'style'])
elif file_mime_type == 'application/pdf':
text, metadata = extract_pdf(file_data['contents'])
else:
# If we can't detect the mimetype we add a flag that can be read by
# the frontend to provide more information on why the document
# wasn't processed.
# XXX: We're returning an empty text because if we don't the
# pipeline will run indefinitely. The right approach is to make
# pypelinin understand an specific exception (something like
# StopPipeline) as a signal to stop processing this pipeline.
return {'mimetype': 'unknown', 'text': "",
'file_metadata': {}, 'language': ""}
text, forced_decoding = trial_decode(text)
if isinstance(text, unicode):
# HTMLParser only handles unicode objects. We can't pass the text
# through it if we don't know the encoding, and it's possible we
# also shouldn't. There's no way of knowing if it's a badly encoded
# html or a binary blob that happens do have bytes that look liked
# html entities.
text = HTMLParser().unescape(text)
text = clean(text)
if isinstance(text, unicode):
language = cld.detect(text.encode('utf-8'))[1]
else:
language = cld.detect(text)[1]
return {'text': text, 'file_metadata': metadata, 'language': language,
'mimetype': file_mime_type, 'forced_decoding': forced_decoding}
|
fccoelho/pypln.backend
|
pypln/backend/workers/extractor.py
|
Python
|
gpl-3.0
| 8,419
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2021 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Creating a sampled multidataset
"""
from .world import world, setup_module, teardown_module
from . import create_source_steps as source_create
from . import create_dataset_steps as dataset_create
class TestMultiDataset(object):
def setup(self):
"""
Debug information
"""
print("\n-------------------\nTests in: %s\n" % __name__)
def teardown(self):
"""
Debug information
"""
print("\nEnd of tests in: %s\n-------------------\n" % __name__)
def test_scenario1(self):
"""
Scenario: Successfully creating a sampled multi-dataset:
Given I create a data source with "<params>" uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a multi-dataset with sample rates <rates>
And I wait until the multi-dataset is ready less than <time_3> secs
When I compare the datasets' instances
Then the proportion of instances between datasets is <rate>
Examples:
| data | time_1 | time_2 | time_3 | rate |rates
| ../data/iris.csv | 10 | 10 | 10 | 0.5 |[0.2, 0.3]
"""
print(self.test_scenario1.__doc__)
examples = [
['data/iris.csv', '50', '50', '50', '0.5', '[0.2, 0.3]']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file_with_args(self, example[0], '{}')
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self,
example[2])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self,
example[2])
dataset_create.i_create_a_multidataset(self, example[5])
dataset_create.the_dataset_is_finished_in_less_than(self,
example[3])
dataset_create.i_compare_datasets_instances(self)
dataset_create.proportion_datasets_instances(self, example[4])
def test_scenario2(self):
"""
Scenario: Successfully creating a single dataset multi-dataset:
Given I create a data source with "<params>" uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a multi-dataset with sample rates <rates>
And I wait until the multi-dataset is ready less than <time_3> secs
When I compare the datasets' instances
Then the proportion of instances between datasets is <rate>
Examples:
| data | time_1 | time_2 | time_3 | rate |rates
| ../data/iris.csv | 10 | 10 | 10 | 0.2 |[0.2]
"""
print(self.test_scenario2.__doc__)
examples = [
['data/iris.csv', '50', '50', '50', '0.2', '[0.2]']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file_with_args(self, example[0], '{}')
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self,
example[2])
dataset_create.i_create_a_multidataset(self, example[5])
dataset_create.the_dataset_is_finished_in_less_than(self,
example[3])
dataset_create.i_compare_datasets_instances(self)
dataset_create.proportion_datasets_instances(self, example[4])
def test_scenario3(self):
"""
Scenario: Successfully creating a sampled multi-dataset with sample:
Given I create a data source with "<params>" uploading a "<data>" file
And I wait until the source is ready less than <time_1> secs
And I create a dataset
And I wait until the dataset is ready less than <time_2> secs
And I create a multi-dataset with same dataset and the first sample rate <rates>
And I wait until the multi-dataset is ready less than <time_3> secs
When I compare the datasets' instances
Then the proportion of instances between datasets is <rate>
Examples:
| data | time_1 | time_2 | time_3 | rate |rates
| ../data/iris.csv | 10 | 10 | 10 | 1.3 |[1, 0.3]
"""
print(self.test_scenario3.__doc__)
examples = [
['data/iris.csv', '50', '50', '50', '1.3', '[1, 0.3]']]
for example in examples:
print("\nTesting with:\n", example)
source_create.i_upload_a_file_with_args(self, example[0], '{}')
source_create.the_source_is_finished(self, example[1])
dataset_create.i_create_a_dataset(self)
dataset_create.the_dataset_is_finished_in_less_than(self,
example[2])
dataset_create.i_create_a_multidataset_mixed_format(self, example[5])
dataset_create.the_dataset_is_finished_in_less_than(self,
example[3])
dataset_create.i_compare_datasets_instances(self)
dataset_create.proportion_datasets_instances(self, example[4])
|
jaor/python
|
bigml/tests/test_41_multidataset.py
|
Python
|
apache-2.0
| 6,851
|
from Client import Client
from fit.ColumnFixture import ColumnFixture
class CalculateFirstPhoneNumber(ColumnFixture):
client = Client()
phones = []
def first(self):
self.client.setPhones(self.phones)
return self.client.firstPhone()
|
epronk/pyfit2
|
examples/CalculateFirstPhoneNumber.py
|
Python
|
gpl-2.0
| 261
|
"""Spectral Embedding"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state, check_array, check_symmetric
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=np.bool)
nodes_to_explore = np.zeros(n_node, dtype=np.bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
neighbors = graph[i].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
norm_laplacian : bool
Whether the value of the diagonal should be changed or not
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional, default 8
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
norm_laplacian : bool, optional, default=True
If True, then compute normalized Laplacian.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = random_state.uniform(-1, 1, laplacian.shape[0])
lambdas, diffusion_map = eigsh(laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol, v0=v0)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None, n_jobs=1):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_,
include_self=True,
n_jobs=self.n_jobs)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X, ensure_min_samples=2, estimator=self)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
|
meduz/scikit-learn
|
sklearn/manifold/spectral_embedding_.py
|
Python
|
bsd-3-clause
| 20,837
|
import dbus
#
# Copyright (c) 2014-2022 The Voxie Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import dbus.service
import xml.etree.ElementTree
import sys
import inspect
import io
import os
# Raise an exception if value is not a valid value with type signature
def verifyValue(signature, value):
signature = dbus.Signature(signature)
if len(list(signature)) != 1:
raise Exception('Expected a single complete type')
if signature == 'y' or signature == 'n' or signature == 'q' or signature == 'i' or signature == 'u' or signature == 'x' or signature == 't':
if type(value) != int:
raise Exception('Expected an int, got a %s' % repr(type(value)))
elif signature == 'd':
if type(value) != float and type(value) != int:
raise Exception('Expected a float, got a %s' % repr(type(value)))
elif signature == 'b':
if type(value) != bool:
raise Exception('Expected a bool, got a %s' % repr(type(value)))
elif signature == 'h':
if type(value) != dbus.types.UnixFd:
raise Exception(
'Expected a dbus.types.UnixFd, got a %s' % repr(type(value)))
if 'variant_level' in dir(value):
# Starting with dbus-python 1.2.10 UnixFd has a variant_level
if value.variant_level != 0:
raise Exception('Got dbus.ObjectPath with non-zero variant level')
elif signature == 's':
if type(value) != str:
raise Exception('Expected a str, got a %s' % repr(type(value)))
elif signature == 'o':
if type(value) != dbus.ObjectPath:
raise Exception('Expected a dbus.ObjectPath, got a %s' %
repr(type(value)))
if value.variant_level != 0:
raise Exception('Got dbus.ObjectPath with non-zero variant level')
elif signature == 'g':
if type(value) != dbus.Signature:
raise Exception('Expected a dbus.Signature, got a %s' %
repr(type(value)))
if value.variant_level != 0:
raise Exception('Got dbus.Signature with non-zero variant level')
elif signature == 'v':
if type(value) != Variant:
raise Exception('Expected a Variant, got a %s' % repr(type(value)))
elif signature[0:2] == 'a{':
if signature[-1] != '}':
raise Exception("signature[-1] != '}'")
types = list(dbus.Signature(signature[2:-1]))
if len(types) != 2:
raise Exception('len(types) != 2')
keyType = types[0]
valueType = types[1]
if type(value) != dict:
raise Exception('Expected a dict, got a %s' % repr(type(value)))
for key in value:
verifyValue(keyType, key)
verifyValue(valueType, value[key])
elif signature[0] == 'a':
valueType = signature[1:]
if type(value) != list:
raise Exception('Expected a list, got a %s' % repr(type(value)))
for member in value:
verifyValue(valueType, member)
elif signature[0] == '(':
types = list(dbus.Signature(signature[1:-1]))
if type(value) != tuple:
raise Exception('Expected a tuple, got a %s' % repr(type(value)))
if len(types) != len(value):
raise Exception('Expected a tuple with %d elements, got %d' % (
len(types), len(value)))
for i in range(len(types)):
verifyValue(types[i], value[i])
else:
raise Exception('Unknown signature: %s' % signature)
# TODO: This is specific for VoxieContext
def convertObjectPath(val):
if val is None:
return dbus.ObjectPath('/')
if isinstance(val, DBusObject):
return val._objectPath
return val
class Variant:
def __init__(self, signature, value):
self.__signature = dbus.Signature(signature)
self.__value = value
if len(list(self.__signature)) != 1:
raise Exception(
'Signature is not a single complete type: ' + str(self.__signature))
# TODO: convert self.__value here?
if self.__signature == 'o':
self.__value = convertObjectPath(self.__value)
elif self.__signature == 'ao':
self.__value = list(map(convertObjectPath, self.__value))
verifyValue(signature, self.__value)
@property
def signature(self):
return self.__signature
@property
def value(self):
return self.__value
def getValue(self, expectedSignature):
if self.signature != expectedSignature:
raise Exception("Expected signature '%s', got '%s'" %
(expectedSignature, self.signature))
return self.value
def check_id(id):
for c in id:
if ord(c) >= 128:
continue
if c >= '0' and c <= '9':
continue
if c >= 'A' and c <= 'Z':
continue
if c >= 'a' and c <= 'z':
continue
if c == '_':
continue
raise Exception('Invalid character ' + c + ' in argument name ' + repr(id))
# Just setting __signature__ (PEP-0362) won't be picked up by Spyder
def fake_arglist(realfunc, name, args, defValues={}, kwonlyArgs=[], makeKWOnlyArgsNormal=False):
# http://stackoverflow.com/questions/1409295/set-function-signature-in-python/1409496#1409496
name = str(name)
check_id(name)
args_checked_f = []
args_checked = []
for arg in args:
s = str(arg)
check_id(s)
args_checked.append(s)
if s in defValues:
s = s + ' = ' + defValues[s]
args_checked_f.append(s)
if len(kwonlyArgs) != 0 and not makeKWOnlyArgsNormal:
args_checked_f.append("*")
for arg in kwonlyArgs:
s = str(arg)
check_id(s)
args_checked.append(s + ' = ' + s)
args_checked_f.append(s)
args_checked.append("**kwargs")
args_checked_f.append("**kwargs")
argstr = ", ".join(args_checked)
argstr_f = ", ".join(args_checked_f)
fakefunc = "class DBusObjectFakeClass:\n def %s(self, %s):\n return real_func.__get__(self, None)(%s)\n" % (
name, argstr_f, argstr)
# print (fakefunc)
fakefunc_code = compile(fakefunc, "fakesource", "exec")
fakeglobals = {}
eval(fakefunc_code, {"real_func": realfunc}, fakeglobals)
return fakeglobals['DBusObjectFakeClass'].__dict__[name]
def get_variant_level(val):
if type(val) == dbus.types.UnixFd and 'variant_level' not in dir(val):
return 1 # Why does dbus.types.UnixFd not have a variant_level? Fixed in dbus-python 1.2.10
return val.variant_level
def reduce_variant_level(val, amount):
if type(val) == dbus.types.UnixFd and 'variant_level' not in dir(val):
return val # Why does dbus.types.UnixFd not have a variant_level? Fixed in dbus-python 1.2.10
kwargs = {}
if hasattr(val, 'signature'):
kwargs['signature'] = val.signature
if type(val) == dbus.types.UnixFd:
# A UnixFd constructor does not accept another UnixFd as parameter
# Note: Calling take() will invalidate this original val
val2 = val.take()
try:
return type(val)(val2, variant_level=get_variant_level(val) - amount, **kwargs)
finally:
# Because the constructor calls dup(), the original FD has to be closed
os.close(val2)
else:
return type(val)(val, variant_level=get_variant_level(val) - amount, **kwargs)
def get_variant_sig(val, *, addToLevel=0):
if get_variant_level(val) + addToLevel < 0:
raise Exception('get_variant_level(val) + addToLevel < 0')
if get_variant_level(val) + addToLevel == 0:
raise Exception('value is not a variant')
if get_variant_level(val) + addToLevel > 1:
return 'v'
t = type(val)
if t == dbus.Byte:
return 'y'
if t == dbus.Int16:
return 'n'
if t == dbus.UInt16:
return 'q'
if t == dbus.Int32:
return 'i'
if t == dbus.UInt32:
return 'u'
if t == dbus.Int64:
return 'x'
if t == dbus.UInt64:
return 't'
if t == dbus.Double:
return 'd'
if t == dbus.Boolean:
return 'b'
if t == dbus.String:
return 's'
if t == dbus.ObjectPath:
return 'o'
if t == dbus.Signature:
return 'g'
if t == dbus.types.UnixFd:
return 'h'
if t == dbus.Array:
return 'a' + val.signature
if t == dbus.Dictionary:
return 'a{' + val.signature + '}'
if t == dbus.Struct:
if val.signature is not None:
return '(' + val.signature + ')'
else:
s = '('
for v in val:
s += get_variant_sig(v, addToLevel=1)
s += ')'
return s
raise Exception('Unknown type: ' + str(t))
def add_arg(f):
return lambda value, callContext: f(value)
# TODO: Clean up, get dbusObject for variant from call 'self' parameter?
# TODO: Clean up in general, use more classes
def get_to_dbus_cast(sig, *, context, dbusObject, dbusObjectInfo, xmlElement, variant_level=0):
if context is not None and 'getConverterToDBus' in dir(context):
retVal = context.getConverterToDBus(
dbusType=sig, xmlElement=xmlElement, dbusObject=dbusObject, dbusObjectInfo=dbusObjectInfo, variantLevel=variant_level)
if retVal is not None:
return retVal
if sig == 'y':
return lambda value, callContext: dbus.Byte(value, variant_level=variant_level)
if sig == 'n':
return lambda value, callContext: dbus.Int16(value, variant_level=variant_level)
if sig == 'q':
return lambda value, callContext: dbus.UInt16(value, variant_level=variant_level)
if sig == 'i':
return lambda value, callContext: dbus.Int32(value, variant_level=variant_level)
if sig == 'u':
return lambda value, callContext: dbus.UInt32(value, variant_level=variant_level)
if sig == 'x':
return lambda value, callContext: dbus.Int64(value, variant_level=variant_level)
if sig == 't':
return lambda value, callContext: dbus.UInt64(value, variant_level=variant_level)
if sig == 'd':
return lambda value, callContext: dbus.Double(value, variant_level=variant_level)
if sig == 'b':
return lambda value, callContext: dbus.Boolean(value, variant_level=variant_level)
if sig == 's':
return lambda value, callContext: dbus.String(value, variant_level=variant_level)
if sig == 'o':
return lambda value, callContext: dbus.ObjectPath(value, variant_level=variant_level)
if sig == 'g':
return lambda value, callContext: dbus.Signature(value, variant_level=variant_level)
# A UnixFd parameter is expected to already be a dbus.types.UnixFd object
if sig == 'h':
def convert(value, callContext):
# TODO: should verify that variant_level is 0
if type(value) != dbus.types.UnixFd:
raise Exception(
'Expected a dbus.types.UnixFd, got a %s', (type(value),))
return value
return convert
if sig == 'v':
def convertVariant(value, callContext):
ty = type(value)
if ty != Variant:
raise Exception('Expected a %s, got a %s' % (Variant, ty))
val = value.value
ty = type(val)
# TODO: Should xmlElement be set to None here? (Will prevent annotations from having an effect on values passed as variants) # Set dbusObject to None here to prevent object cycles which break deterministic cleanup
cast = get_to_dbus_cast(value.signature, context=context, dbusObject=None,
dbusObjectInfo=dbusObjectInfo, xmlElement=None, variant_level=variant_level + 1)
dval = cast(val, callContext=callContext)
return dval
# return ty (dval, variant_level = get_variant_level(dval) + variant_level)
return convertVariant
if sig[0] == '(':
if sig[-1] != ')':
raise Exception("sig[-1] != ')'")
casts = []
for elem in dbus.Signature(sig[1:-1]):
casts.append(get_to_dbus_cast(elem, context=context, dbusObject=dbusObject,
dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement, variant_level=0))
def fun(value, callContext):
lval = len(value)
if lval != len(casts):
raise Exception("Invalid number of values for '%s' argument, expected %d, got %d" % (
sig, len(casts), lval))
res = []
for i in range(len(casts)):
res.append(casts[i](value[i], callContext=callContext))
return dbus.Struct(res, signature=sig[1:-1], variant_level=variant_level)
return fun
if sig[0:2] == 'a{':
if sig[-1] != '}':
raise Exception("sig[-1] != ')'")
t = list(dbus.Signature(sig[2:-1]))
if len(t) != 2:
raise Exception('len (t) != 2')
castn = get_to_dbus_cast(t[0], context=context, dbusObject=dbusObject,
dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement)
castv = get_to_dbus_cast(t[1], context=context, dbusObject=dbusObject,
dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement)
def fun(value, callContext):
res = {}
for name in value:
res[castn(name, callContext=callContext)] = castv(
value[name], callContext=callContext)
return dbus.Dictionary(res, signature=sig[2:-1], variant_level=variant_level)
return fun
if sig[0] == 'a':
cast = get_to_dbus_cast(sig[1:], context=context, dbusObject=dbusObject,
dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement)
def fun(value, callContext):
res = []
for i in value:
res.append(cast(i, callContext=callContext))
return dbus.Array(res, signature=sig[1:], variant_level=variant_level)
return fun
raise Exception('Unknown signature: ' + sig)
def get_from_dbus_cast(sig, *, context, dbusObject, dbusObjectInfo, xmlElement, byte_arrays=None, ignore_variant_levels=0):
# TODO: Clean this up? Should the caller of the converter take care of this?
if ignore_variant_levels != 0:
converter = get_from_dbus_cast(sig, context=context, dbusObject=dbusObject, dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement, byte_arrays=byte_arrays, ignore_variant_levels=0)
def convertIgnoreVariantLevels(value, callContext):
return converter(reduce_variant_level(value, ignore_variant_levels), callContext)
return convertIgnoreVariantLevels
if context is not None and 'getConverterFromDBus' in dir(context):
retVal = context.getConverterFromDBus(
dbusType=sig, xmlElement=xmlElement, dbusObject=dbusObject, dbusObjectInfo=dbusObjectInfo)
if retVal is not None:
return retVal
if sig == 'ay' and byte_arrays:
return add_arg(bytes)
if sig == 'y':
return add_arg(int)
if sig == 'n':
return add_arg(int)
if sig == 'q':
return add_arg(int)
if sig == 'i':
return add_arg(int)
if sig == 'u':
return add_arg(int)
if sig == 'x':
return add_arg(int)
if sig == 't':
return add_arg(int)
if sig == 'd':
return add_arg(float)
if sig == 'b':
return add_arg(bool)
if sig == 's':
return add_arg(str)
if sig == 'o':
return add_arg(dbus.ObjectPath)
if sig == 'g':
return add_arg(dbus.Signature)
if sig == 'h':
# return add_arg(dbus.types.UnixFd) # Does not work, cannot pass a UnixFd to dbus.types.UnixFd
return lambda value, callContext: value
if sig == 'v':
def convertVariant(value, callContext):
sig = get_variant_sig(value)
# TODO: Should xmlElement be set to None here? (Will prevent annotations from having an effect on values passed as variants) # Set dbusObject to None here to prevent object cycles which break deterministic cleanup
cast = get_from_dbus_cast(
sig, context=context, dbusObject=None, dbusObjectInfo=dbusObjectInfo, xmlElement=None, ignore_variant_levels=1)
dval = cast(value, callContext=callContext)
return Variant(sig, dval)
return convertVariant
if sig[0] == '(':
if sig[-1] != ')':
raise Exception("sig[-1] != ')'")
casts = []
for elem in dbus.Signature(sig[1:-1]):
casts.append(get_from_dbus_cast(elem, context=context, dbusObject=dbusObject,
dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement, byte_arrays=byte_arrays))
def fun(value, callContext):
lval = len(value)
if lval != len(casts):
raise Exception("Invalid number of values for '%s' argument, expected %d, got %d" % (
sig, len(casts), lval))
res = []
for i in range(len(casts)):
res.append(casts[i](value[i], callContext=callContext))
return tuple(res)
return fun
if sig[0:2] == 'a{':
if sig[-1] != '}':
raise Exception("sig[-1] != ')'")
t = list(dbus.Signature(sig[2:-1]))
if len(t) != 2:
raise Exception('len (t) != 2')
castn = get_from_dbus_cast(t[0], context=context, dbusObject=dbusObject,
dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement, byte_arrays=byte_arrays)
castv = get_from_dbus_cast(t[1], context=context, dbusObject=dbusObject,
dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement, byte_arrays=byte_arrays)
def fun(value, callContext):
res = {}
for name in value:
# print ('castv', t[1])
res[castn(name, callContext=callContext)] = castv(
value[name], callContext=callContext)
return res
return fun
if sig == 'a{sv}':
return lambda value, callContext: dict(list(map(lambda val: (str(val), value[val]), value)))
if sig == 'a{ss}':
return lambda value, callContext: dict(list(map(lambda val: (str(val), str(value[val])), value)))
if sig[0] == 'a':
cast = get_from_dbus_cast(sig[1:], context=context, dbusObject=dbusObject,
dbusObjectInfo=dbusObjectInfo, xmlElement=xmlElement, byte_arrays=byte_arrays)
def fun(value, callContext):
res = []
for i in value:
res.append(cast(i, callContext=callContext))
return res
return fun
raise Exception('Unknown signature: ' + sig)
class DBusObjectContext(object):
def __init__(self, interfaces):
self.handleMessagesDefault = False
self.interfaces = {}
for interface in interfaces:
if interface.tag == 'interface':
name = interface.attrib['name']
# print(name)
self.interfaces[name] = interface
class DBusCallContext(object):
def success(self):
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return False
class DBusServiceCallContext(object):
def __init__(self, info):
self.info = info
def success(self):
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
return False
class DBusServiceCallInfo(object):
pass
class DBusObject(object):
def __init__(self, obj, interfaces, context=None, referenceCountingObject=None):
if type(interfaces) != list:
raise Exception('interfaces is not a list but a %s' %
(type(interfaces),))
self.__busObject = obj
self.__bus = obj._bus
self.__busName = str(obj.bus_name) if obj.bus_name is not None else ''
# Make sure self._objectPath has a variant level of 0
self.__objectPath = dbus.ObjectPath(str(obj.object_path))
self.__interfaces = interfaces
self.__propObj = dbus.Interface(obj, 'org.freedesktop.DBus.Properties')
self.__propget = {}
self.__methods = {}
self.__propset = {}
self.__context = context
self.__referenceCountingObject = referenceCountingObject
self.__dbusObjectInfo = {'bus': self.__bus, 'busName': self.__busName,
'objectPath': self.__objectPath, 'interfaces': self.__interfaces}
self.__names = []
self.__names = list(object.__dir__(self))
introspectionResult = None
introspectionResultDoc = None
for interfaceName in interfaces:
if self.__context is None:
if introspectionResultDoc is None:
introspectable = dbus.Interface(
obj, 'org.freedesktop.DBus.Introspectable')
introspectionResult = introspectable.Introspect()
introspectionResultDoc = xml.etree.ElementTree.fromstring(
introspectionResult)
interface = None
for child in introspectionResultDoc:
if child.tag == 'interface' and child.attrib['name'] == interfaceName:
# print (child)
interface = child
if interface is None:
raise Exception('Could not find interface ' +
interfaceName + ' in reflection data')
else:
interface = self.__context.interfaces[interfaceName]
# print (interface)
for child in interface:
if child.tag == 'method':
name = child.attrib['name']
if name in self.__methods:
continue
if name in self.__names:
addToPropget = False
else:
addToPropget = True
self.__names += [name]
cnt = 0
icnt = 0
rsig = None
rXmlElement = None
parnames = []
types = []
argXmlElements = []
typesd = {}
argXmlElementsD = {}
for arg in child:
if arg.tag != 'arg':
continue
if arg.attrib['direction'] != 'out':
if 'name' in arg.attrib:
paramName = arg.attrib['name']
else:
paramName = 'arg%d' % icnt
parnames.append(paramName)
types.append(arg.attrib['type'])
argXmlElements.append(arg)
typesd[paramName] = arg.attrib['type']
argXmlElementsD[paramName] = arg
icnt = icnt + 1
continue
cnt += 1
if cnt != 1:
rsig = None
rXmlElement = None
else:
rsig = arg.attrib['type']
rXmlElement = arg
defValues = {}
defValuesVal = {}
if self.__context is not None and 'defaultParameters' in dir(self.__context):
defPars = self.__context.defaultParameters(
parameterNames=parnames, parameterTypes=types)
for pname in defPars:
defValues[pname] = 'None'
defValuesVal[pname] = defPars[pname]
implicitParameters = {}
if self.__context is not None and 'implicitParameters' in dir(self.__context):
implicitParameters = self.__context.implicitParameters(
parameterNames=parnames, parameterTypes=types)
implicitParameterData = []
for i in range(len(parnames)):
if parnames[i] in implicitParameters:
implicitParameterData.append(
(i, implicitParameters[parnames[i]]))
for dat in reversed(implicitParameterData):
i = dat[0]
del parnames[i]
del types[i]
del argXmlElements[i]
icnt = icnt - 1
def cast(value, callContext):
return value
def castb(value, callContext):
return value
# print (name, rsig)
if rsig is not None:
cast = get_from_dbus_cast(rsig, context=self.__context, dbusObject=self,
dbusObjectInfo=self.__dbusObjectInfo, xmlElement=rXmlElement, byte_arrays=False)
castb = get_from_dbus_cast(rsig, context=self.__context, dbusObject=self,
dbusObjectInfo=self.__dbusObjectInfo, xmlElement=rXmlElement, byte_arrays=True)
inCast = []
inCastD = {}
# for t in types:
for i in range(len(types)):
t = types[i]
inCast.append(get_to_dbus_cast(t, context=self.__context, dbusObject=self,
dbusObjectInfo=self.__dbusObjectInfo, xmlElement=argXmlElements[i], variant_level=0))
for nm in typesd:
inCastD[nm] = get_to_dbus_cast(typesd[nm], context=self.__context, dbusObject=self,
dbusObjectInfo=self.__dbusObjectInfo, xmlElement=argXmlElementsD[nm], variant_level=0)
method = getattr(dbus.Interface(
self.__busObject, interfaceName), name)
# Note: self.__context (and anything else using self) must not be used inside the closure to avoid circular references which would prevent deterministic cleanup
def make_closure(method=method, cast=cast, castb=castb, types=types, typesd=typesd, inCast=inCast, inCastD=inCastD, parnames=parnames, defValuesVal=defValuesVal, context=self.__context, implicitParameterData=implicitParameterData, methodXml=child):
def dbusFunctionWrapper(self, *args, **kwargs):
# print(self)
# print ('Called %s with %s %s' % (method, args, kwargs))
args = list(args)
kwargs = dict(kwargs)
handleMessages = False
timeout = None
if context is not None:
context.handleMessagesDefault
if 'DBusObject_timeout' in kwargs:
if kwargs['DBusObject_timeout'] is not None:
timeout = float(
kwargs['DBusObject_timeout'])
del kwargs['DBusObject_timeout']
if 'DBusObject_handleMessages' in kwargs:
handleMessages = bool(
kwargs['DBusObject_handleMessages'])
del kwargs['DBusObject_handleMessages']
if handleMessages and not getattr(context, 'iteration', False):
raise Exception(
'DBusObject_handleMessages is True but context object has no "iteration" member')
with (context.createCallContext(dbusObject=self, xmlElement=methodXml) if context is not None and 'createCallContext' in dir(context) else DBusCallContext()) as callContext:
for i in range(len(args)):
val = args[i]
if val is None and parnames[i] in defValuesVal:
val = defValuesVal[parnames[i]]
args[i] = inCast[i](
val, callContext=callContext)
while len(args) < len(parnames):
i = len(args)
name = parnames[i]
val = kwargs[name]
if val is None and name in defValuesVal:
val = defValuesVal[name]
val = inCastD[name](
val, callContext=callContext)
del kwargs[name]
args.append(val)
if len(kwargs) != 0:
raise Exception(
'Got leftover keyword arguments: %s' % (repr(kwargs),))
for dat in implicitParameterData:
args.insert(dat[0], dat[1])
if timeout is not None:
kwargs['timeout'] = timeout
# print ('Calling %s with %s %s' % (method, args, kwargs))
if not handleMessages:
res0 = method(*args, **kwargs)
else:
retVal = []
errorVal = []
def lazyDataReply(data=None):
retVal.append(data)
def lazyDataError(error):
errorVal.append(error)
method(
*args, **kwargs, reply_handler=lazyDataReply, error_handler=lazyDataError)
while len(retVal) == 0 and len(errorVal) == 0:
context.iteration()
if len(errorVal) != 0:
raise errorVal[0]
res0 = retVal[0]
res = (castb if kwargs.get('byte_arrays') else cast)(
res0, callContext=callContext)
callContext.success()
return res
return dbusFunctionWrapper
# func = make_closure ()
func = fake_arglist(
make_closure(), name, parnames, defValues)
if addToPropget:
# For tab completion
object.__setattr__(self, name, None)
self.__propget[name] = (
lambda func=func: lambda newSelf: func.__get__(newSelf, None))()
self.__methods[name] = (
lambda func=func: lambda newSelf: func.__get__(newSelf, None))()
elif child.tag == 'property':
# TODO: use context.handleMessagesDefault for properties
name = child.attrib['name']
if name in self.__names:
continue
self.__names += [name]
# print (name)
object.__setattr__(self, name, None) # For tab completion
# TODO: Check signature of variant?
cast = get_from_dbus_cast(
child.attrib['type'], context=self.__context, dbusObject=self, dbusObjectInfo=self.__dbusObjectInfo, xmlElement=child, ignore_variant_levels=1)
# Note: self.__context and self.__propObj (and anything else using self) must not be used inside the closure to avoid circular references which would prevent deterministic cleanup
def make_closure(self, interfaceName, name, cast, context=self.__context, propObj=self.__propObj):
def getter(newSelf):
with (context.createCallContext(dbusObject=newSelf, xmlElement=None) if context is not None and 'createCallContext' in dir(context) else DBusCallContext()) as callContext:
res = cast(propObj.Get(
interfaceName, name), callContext=callContext)
callContext.success()
return res
return getter
self.__propget[name] = make_closure(
self, interfaceName, name, cast)
cast = get_to_dbus_cast(child.attrib['type'], context=self.__context, dbusObject=self,
dbusObjectInfo=self.__dbusObjectInfo, xmlElement=child, variant_level=1)
# Note: self.__context and self.__propObj (and anything else using self) must not be used inside the closure to avoid circular references which would prevent deterministic cleanup
def make_closure(self, interfaceName, name, cast, context=self.__context, propObj=self.__propObj):
def setter(newSelf, value):
with (context.createCallContext(dbusObject=newSelf, xmlElement=None) if context is not None and 'createCallContext' in dir(context) else DBusCallContext()) as callContext:
propObj.Set(interfaceName, name, cast(
value, callContext=callContext))
callContext.success()
return setter
self.__propset[name] = make_closure(
self, interfaceName, name, cast)
elif child.tag == 'signal':
name = child.attrib['name']
if name in self.__names:
continue
self.__names += [name]
# print (name)
object.__setattr__(self, name, None) # For tab completion
# cast = get_from_dbus_cast (child.attrib['type'], context = self.__context, dbusObject = self, dbusObjectInfo = self.__dbusObjectInfo, xmlElement = )
# Note: self.__context and self.__propObj (and anything else using self) must not be used inside the closure to avoid circular references which would prevent deterministic cleanup
def make_closure(self, interfaceName, name, context=self.__context, interfaceObj=dbus.Interface(self.__busObject, interfaceName)):
return lambda newSelf: lambda handler: interfaceObj.connect_to_signal(name, handler, dbus_interface=interfaceName)
self.__propget[name] = make_closure(
self, interfaceName, name)
def __dir__(self):
return self.__names
def __getattribute__(self, name):
if not name.startswith('_') and name in self.__propget:
return self.__propget[name](self)
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
if not name.startswith('_'):
self.__propset[name](self, value)
return
object.__setattr__(self, name, value)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.__referenceCountingObject is not None:
self.__referenceCountingObject.destroy()
return False
def __repr__(self):
contextStr = ''
if self.__context is not None:
contextStr = ', context = ' + repr(self.__context)
referenceCountingObjectStr = ''
if self.__referenceCountingObject is not None:
referenceCountingObjectStr = ', referenceCountingObject = ' + \
repr(self.__referenceCountingObject)
return 'DBusObject((%s, %s, %s), %s%s%s)' % (repr(self.__bus), repr(self.__busName), repr(str(self.__objectPath)), repr(self.__interfaces), contextStr, referenceCountingObjectStr)
@property
def _context(self):
return self.__context
@property
def _connection(self):
return self.__bus
@property
def _busName(self):
return self.__busName
@property
def _interfaces(self):
return self.__interfaces
@property
def _objectPath(self):
return self.__objectPath
@property
def _referenceCountingObject(self):
return self.__referenceCountingObject
@_referenceCountingObject.setter
def _referenceCountingObject(self, value):
self.__referenceCountingObject = value
def _getDBusMethod(self, name):
return self.__methods[name](self)
def _clone(self):
oldRefObj = self._referenceCountingObject
if self._referenceCountingObject is None:
newRefObj = None
else:
newRefObj = self._referenceCountingObject.clone()
context = self._context
if hasattr(context, 'makeObject'):
return context.makeObject(self._connection, self._busName, self._objectPath, self._interfaces, referenceCountingObject=newRefObj)
else:
return DBusObject(self._connection, self._busName, self._objectPath, self._interfaces, referenceCountingObject=newRefObj)
class DBusExportObject(dbus.service.Object):
def __init__(self, interfaces, *, context):
dbus.service.Object.__init__(self)
if type(interfaces) != list:
raise Exception('interfaces is not a list but a %s' %
(type(interfaces),))
interfaces = list(interfaces)
if 'org.freedesktop.DBus.Properties' not in interfaces:
interfaces.append('org.freedesktop.DBus.Properties')
if 'org.freedesktop.DBus.Introspectable' not in interfaces:
interfaces.append('org.freedesktop.DBus.Introspectable')
self.__interfaces = interfaces
self.__context = context
self.__dbusMethods = []
self.__propgetimpl = {}
self.__propsetimpl = {}
self.__proptype = {}
names = set()
impls = dir(self)
newClassDict = {}
introspectionResult = None
introspectionResultDoc = None
for interfaceName in interfaces:
interface = self.__context.interfaces[interfaceName]
# print (interface)
for child in interface:
if child.tag == 'method':
name = child.attrib['name']
if name in names:
print('Warning: DBusExportObject: Ignoring hidden method %s.%s' % (
interfaceName, name), file=sys.stderr)
continue
names.add(name)
if name not in impls:
print('Warning: DBusExportObject: Missing implementation for method %s.%s' % (
interfaceName, name), file=sys.stderr)
continue
cnt = 0
icnt = 0
rsig = None
rXmlElement = None
parnames = []
types = []
argXmlElements = []
typesd = {}
argXmlElementsD = {}
inSig = ''
outSig = ''
for arg in child:
if arg.tag != 'arg':
continue
if arg.attrib['direction'] != 'out':
inSig += arg.attrib['type']
if 'name' in arg.attrib:
paramName = arg.attrib['name']
else:
paramName = 'arg%d' % icnt
parnames.append(paramName)
types.append(arg.attrib['type'])
argXmlElements.append(arg)
typesd[paramName] = arg.attrib['type']
argXmlElementsD[paramName] = arg
icnt = icnt + 1
continue
outSig += arg.attrib['type']
cnt += 1
if cnt != 1:
rsig = None
rXmlElement = None
else:
rsig = arg.attrib['type']
rXmlElement = arg
defValues = {}
defValuesVal = {}
def cast(value, callContext):
return value
# print (name, rsig)
if rsig is not None:
cast = get_to_dbus_cast(rsig, context=self.__context, dbusObject=None,
dbusObjectInfo=None, xmlElement=rXmlElement, variant_level=0)
inCast = []
inCastD = {}
# for t in types:
for i in range(len(types)):
t = types[i]
inCast.append(get_from_dbus_cast(t, context=self.__context, dbusObject=None,
dbusObjectInfo=None, xmlElement=argXmlElements[i], byte_arrays=False))
for nm in typesd:
inCastD[nm] = get_from_dbus_cast(
typesd[nm], context=self.__context, dbusObject=None, dbusObjectInfo=None, xmlElement=argXmlElementsD[nm], byte_arrays=False)
method = getattr(type(self), name)
if isinstance(method, property):
raise Exception('Method %s is a property' % (name,))
# print(method)
methodSig = inspect.signature(method)
methodKwOnlyArguments = set()
for arg in methodSig.parameters.values():
if arg.kind == inspect.Parameter.KEYWORD_ONLY:
methodKwOnlyArguments.add(arg.name)
# print (method, methodSig, methodKwOnlyArguments)
addInfoArg = 'dbusServiceCallInfo' in methodKwOnlyArguments
def make_closure(method=method, cast=cast, types=types, typesd=typesd, inCast=inCast, inCastD=inCastD, parnames=parnames, defValuesVal=defValuesVal, context=self.__context, methodXml=child, addInfoArg=addInfoArg):
def dbusFunctionWrapper(self, *args, _DBusExportObject_info_sender, _DBusExportObject_info_path, _DBusExportObject_info_destination, _DBusExportObject_info_message, _DBusExportObject_info_connection, _DBusExportObject_info_rel_path, **kwargs):
# print ('Called %s on %s with %s %s' % (method, self, args, kwargs))
info = DBusServiceCallInfo()
info.sender = _DBusExportObject_info_sender
info.object_path = _DBusExportObject_info_path
info.destination = _DBusExportObject_info_destination
info.message = _DBusExportObject_info_message
info.connection = _DBusExportObject_info_connection
info.rel_path = _DBusExportObject_info_rel_path
args = list(args)
kwargs = dict(kwargs)
with (context.createServiceCallContext(dbusObject=self, xmlElement=methodXml, info=info) if context is not None and 'createServiceCallContext' in dir(context) else DBusServiceCallContext(info=info)) as callContext:
for i in range(len(args)):
val = args[i]
if val is None and parnames[i] in defValuesVal:
val = defValuesVal[parnames[i]]
args[i] = inCast[i](
val, callContext=callContext)
while len(args) < len(parnames):
i = len(args)
name = parnames[i]
val = kwargs[name]
if val is None and name in defValuesVal:
val = defValuesVal[name]
val = inCastD[name](
val, callContext=callContext)
del kwargs[name]
args.append(val)
if len(kwargs) != 0:
raise Exception(
'Got leftover keyword arguments: %s' % (repr(kwargs),))
if addInfoArg:
kwargs['dbusServiceCallInfo'] = info
# print ('Calling %s with %s %s' % (method, args, kwargs))
res = cast(method(self, *args, **kwargs),
callContext=callContext)
callContext.success()
return res
return dbusFunctionWrapper
func = make_closure()
func.__wrapped__ = method
kwonlyArgs = [
'_DBusExportObject_info_sender',
'_DBusExportObject_info_path',
'_DBusExportObject_info_destination',
'_DBusExportObject_info_message',
'_DBusExportObject_info_connection',
'_DBusExportObject_info_rel_path',
]
# Should work without this in newer dbus-python versions
func = fake_arglist(
func, name, parnames, kwonlyArgs=kwonlyArgs, makeKWOnlyArgsNormal=True)
# func = method
# print (inSig, outSig, func, inspect.signature(func))
dbusMethod = dbus.service.method(dbus_interface=interfaceName, in_signature=inSig, out_signature=outSig, sender_keyword='_DBusExportObject_info_sender', path_keyword='_DBusExportObject_info_path',
destination_keyword='_DBusExportObject_info_destination', message_keyword='_DBusExportObject_info_message', connection_keyword='_DBusExportObject_info_connection', rel_path_keyword='_DBusExportObject_info_rel_path')(func)
self.__dbusMethods.append(dbusMethod)
# setattr(self, name, dbusMethod)
newClassDict[name] = dbusMethod
elif child.tag == 'property':
name = child.attrib['name']
if name in names:
print('Warning: DBusExportObject: Ignoring hidden property %s.%s' % (
interfaceName, name), file=sys.stderr)
continue
names.add(name)
if name not in impls:
print('Warning: DBusExportObject: Missing implementation for method %s.%s' % (
interfaceName, name), file=sys.stderr)
continue
access = child.attrib['access']
if access not in ['readwrite', 'read', 'write']:
raise Exception(
'Invalid "access" value: ' + repr(access))
prop = getattr(type(self), name)
if not isinstance(prop, property):
raise Exception(
'Property %s is not a property' % (name,))
# print (name)
self.__proptype[interfaceName +
'.' + name] = child.attrib['type']
if access in ['read', 'readwrite']:
if prop.fget is None:
raise Exception(
'Property %s is not readable' % name)
cast = get_to_dbus_cast(child.attrib['type'], context=self.__context, dbusObject=None,
dbusObjectInfo=None, xmlElement=child, variant_level=0) # TODO: variant_level?
def make_closure(self, interfaceName, name, cast, context=self.__context, prop=prop):
def getter(newSelf, info):
with (context.createServiceCallContext(dbusObject=newSelf, xmlElement=None, info=info) if context is not None and 'createServiceCallContext' in dir(context) else DBusServiceCallContext(info=info)) as callContext:
# TODO: remove cast because Get() already does the cast. What should happen to annotations, how should they be forwarded to Get()?
# res = cast (prop.fget(newSelf), callContext = callContext)
res = prop.fget(newSelf)
callContext.success()
return res
return getter
self.__propgetimpl[interfaceName + '.' +
name] = make_closure(self, interfaceName, name, cast)
if access in ['write', 'readwrite']:
if prop.fset is None:
raise Exception(
'Property %s is not writable' % name)
# TODO: Check signature of variant?
cast = get_from_dbus_cast(
child.attrib['type'], context=self.__context, dbusObject=None, dbusObjectInfo=None, xmlElement=child, ignore_variant_levels=1)
def make_closure(self, interfaceName, name, cast, context=self.__context, prop=prop):
def setter(newSelf, value, info):
with (context.createServiceCallContext(dbusObject=newSelf, xmlElement=None, info=info) if context is not None and 'createServiceCallContext' in dir(context) else DBusServiceCallContext(info=info)) as callContext:
prop.fset(newSelf, cast(
value, callContext=callContext))
callContext.success()
return setter
self.__propsetimpl[interfaceName + '.' +
name] = make_closure(self, interfaceName, name, cast)
elif child.tag == 'signal':
name = child.attrib['name']
if name in names:
print('Warning: DBusExportObject: Ignoring hidden signal %s.%s' % (
interfaceName, name), file=sys.stderr)
continue
names.add(name)
raise Exception('TODO: not implemented')
# print (name)
object.__setattr__(self, name, None) # For tab completion
# cast = get_from_dbus_cast (child.attrib['type'], context = self.__context, dbusObject = None, dbusObjectInfo = None, xmlElement = )
# Note: self.__context and self.__propObj (and anything else using self) must not be used inside the closure to avoid circular references which would prevent deterministic cleanup
def make_closure(self, interfaceName, name, context=self.__context, interfaceObj=dbus.Interface(self.__busObject, interfaceName)):
return lambda newSelf: lambda handler: interfaceObj.connect_to_signal(name, handler, dbus_interface=interfaceName)
self.__propget[name] = make_closure(
self, interfaceName, name)
newClass = type('DBusExportObjectClass_' +
self.__class__.__name__, (self.__class__,), newClassDict)
# print(self.__class__, newClass)
self.__class__ = newClass
def Introspect(self, *, dbusServiceCallInfo):
data = '<!DOCTYPE node PUBLIC "-//freedesktop//DTD D-BUS Object Introspection 1.0//EN"\n"http://www.freedesktop.org/standards/dbus/1.0/introspect.dtd">\n'
data += '<node name="%s">\n' % dbusServiceCallInfo.object_path
for interfaceName in self.__interfaces:
data += ' '
interfaceData = io.StringIO()
xml.etree.ElementTree.ElementTree(self.__context.interfaces[interfaceName]).write(
interfaceData, encoding='unicode')
data += interfaceData.getvalue().strip()
data += '\n'
for name in dbusServiceCallInfo.connection.list_exported_child_objects(dbusServiceCallInfo.object_path):
data += ' <node name="%s"/>\n' % name
data += '</node>\n'
return data
def Get(self, interface_name, property_name, *, dbusServiceCallInfo):
if interface_name == "":
raise Exception(
'Getting property values without interface name is not supported')
name = interface_name + '.' + property_name
if '.' in property_name:
raise Exception(
'Getting property values without interface name is not supported')
if name not in self.__propgetimpl:
if name in self.__propsetimpl:
raise Exception(
'Property %s in interface %s is a write-only property' % (interface_name, property_name))
raise Exception('Property %s in interface %s not found' %
(interface_name, property_name))
return Variant(self.__proptype[name], self.__propgetimpl[name](self, info=dbusServiceCallInfo))
def GetAll(self, interface_name, *, dbusServiceCallInfo):
if interface_name == "":
raise Exception(
'Getting property values without interface name is not supported')
result = {}
prefix = interface_name + '.'
for name in self.__propgetimpl:
if not name.startswith(prefix):
continue
pname = name[len(prefix):]
result[pname] = Variant(self.__proptype[name], self.__propgetimpl[name](
self, info=dbusServiceCallInfo))
return result
def Set(self, interface_name, property_name, value, *, dbusServiceCallInfo):
if interface_name == "":
raise Exception(
'Getting property values without interface name is not supported')
name = interface_name + '.' + property_name
if '.' in property_name:
raise Exception(
'Getting property values without interface name is not supported')
if name not in self.__propsetimpl:
if name in self.__propgetimpl:
raise Exception(
'Property %s in interface %s is a read-only property' % (interface_name, property_name))
raise Exception('Property %s in interface %s not found' %
(interface_name, property_name))
# TODO: from_dbus converter will be called twice here?
self.__propsetimpl[name](self, value, info=dbusServiceCallInfo)
|
voxie-viewer/voxie
|
pythonlib/voxie/dbusobject.py
|
Python
|
mit
| 57,901
|
import numpy as np
#from scipy.special import sph_jn
# put best fit values of As and alpha from chi square analysis
or0 = 0.0000475
thetacmb = 2.728/2.7
def zeq(om0, h):
return 2.5*10**4.*om0*h**2.*thetacmb**(-4.)
def bb1(om0, h):
return 0.313*(om0*h**2.)**(-0.419)*(1.+0.607*(om0*h**2.)**0.674)
def bb2(om0, h):
return 0.238*(om0*h**2.)**0.223
def zd(om0, ob0, h):
return 1291.*((om0*h**2.)**0.251*(1.+bb1(om0, h)*(ob0*h**2.)**bb2(om0, h)))/(1.+0.659*(om0*h**2.)**0.828)
def keq(om0, h):
return 0.0746*om0*h**2*thetacmb**(-2.)
def Req(om0, ob0, h):
return 31.5*ob0*h**2.*thetacmb**(-4.)*(1000/zeq(om0, h))
def Rd(om0, ob0, h):
return 31.5*ob0*h**2.*thetacmb**(-4.)*(1000/zd(om0, ob0, h))
def s(om0, ob0, h):
return (2./(3.*keq(om0, h)))*np.sqrt(6/Req(om0, ob0, h))*np.log((np.sqrt(1.+Rd(om0, ob0, h))+np.sqrt(Rd(om0, ob0, h)+Req(om0, ob0, h)))/(1+np.sqrt(Req(om0, ob0, h))))
def ksilk(om0, ob0, h):
return 1.6*(ob0*h**2.)**0.52*(om0*h**2.)**0.73*(1.+(10.4*om0*h**2.)**(-0.95))
def a1(om0, h):
return (46.9*om0*h**2.)**(0.670)*(1.+(32.1*om0*h**2.)**(-0.532))
def a2(om0, h):
return (12.0*om0*h**2.)**(0.424)*(1.+(45.0*om0*h**2.)**(-0.582))
def alphac(om0, ob0, h):
return a1(om0, h)**(-ob0/om0)*a2(om0, h)**(-(ob0/om0)**3.)
def b1(om0, h):
return 0.944*(1.+(458.*om0*h**2.)**(-0.708))**(-1.0)
def b2(om0, h):
return (0.395*om0*h**2.)**(-0.0266)
def betac(om0, ob0, h):
return 1./(1.+b1(om0, h)*(((om0-ob0)/om0)**b2(om0, h)-1.))
def q(k, om0, h):
return k/(13.41*keq(om0, h))
def C1(x, k, om0, h):
return 14.2/x + 386./(1.+69.9*q(k, om0, h)**1.08)
def T0(k, x, y, om0, ob0, h):
return np.log(np.e+1.8*y*q(k, om0, h))/(np.log(np.e+1.8*y*q(k, om0, h))+C1(x, k, om0, h)*q(k, om0, h)**2.)
def f(k, om0, ob0, h):
return 1./(1.+(k*s(om0, ob0, h)/5.4)**4.)
def Tc(k, x, y, om0, ob0, h):
return f(k, om0, ob0, h)*T0(k, 1.0, y, om0, ob0, h) + (1.-f(k, om0, ob0, h))*T0(k, x, y, om0, ob0, h)
# Baryon Transfer Function
def G(x):
return x*(-6.*np.sqrt(1.+x)+(2.+3.*x)*np.log((np.sqrt(1.+x)+1.)/(np.sqrt(1.+x)-1.)))
def alphab(om0, ob0, h):
return 2.07*keq(om0, h)*s(om0, ob0, h)*(1.+Rd(om0, ob0, h))**(-3./4)*G((1.+zeq(om0, h))/(1+zd(om0, ob0, h)))
def betanode(om0, h):
return 8.41*(om0*h**2.)**0.435
def betab(om0, ob0, h):
return 0.5+ob0/om0+(3.-2.*ob0/om0)*np.sqrt((17.2*om0*h**2.)**2.+1.)
def s1(k, om0, ob0, h):
return s(om0, ob0, h)/((1.+(betanode(om0, h)/(k*s(om0, ob0, h)))**3.)**(1./3))
def Tb(k, x1, y1, om0, ob0, h):
return (T0(k, 1.0, 1.0, om0, ob0, h)/(1.+(k*s(om0, ob0, h)/5.2)**2.)+x1/(1.+(y1/(k*s(om0, ob0, h)))**3.)*np.exp(-(k/ksilk(om0, ob0, h))**1.4))*np.sin(k*s1(k, om0, ob0, h))/(k*s1(k, om0, ob0, h))
# Total Power Spectrum
def Twh(k, om0, ob0, h):
kk = k*h
if ob0 == 0:
ans = T0(kk, alphac(om0, ob0, h), betac(om0, ob0, h), om0, ob0, h)
else:
ans = ob0/om0*Tb(kk, alphab(om0, ob0, h), betab(om0, ob0, h), om0, ob0, h)+(
om0-ob0)/om0*Tc(kk, alphac(om0, ob0, h), betac(om0, ob0, h), om0, ob0, h)
return ans
# print s,zeq,zd
# BBKM Transfer function
def gm(om0, ob0, h):
return om0*h*np.exp(-ob0*(1. + np.sqrt(2.*h)/om0))
def q1(k, om0, ob0, h):
return k/(gm(om0, ob0, h))
def Tbbks(k, om0, ob0, h):
return (np.log(1. + 2.34*q1(k, om0, ob0, h))/(2.34*q1(k, om0, ob0, h)))*(1.+3.89*q1(k, om0, ob0, h) + (16.1*q1(k, om0, ob0, h))**2. + (5.46*q1(k, om0, ob0, h))**3. + (6.71*q1(k, om0, ob0, h))**4)**(-0.25)
|
sum33it/scalpy
|
scalpy/transfer_func.py
|
Python
|
gpl-3.0
| 3,538
|
"""Constants for the NFAndroidTV integration."""
DOMAIN: str = "nfandroidtv"
CONF_DURATION = "duration"
CONF_FONTSIZE = "fontsize"
CONF_POSITION = "position"
CONF_TRANSPARENCY = "transparency"
CONF_COLOR = "color"
CONF_INTERRUPT = "interrupt"
DEFAULT_NAME = "Android TV / Fire TV"
DEFAULT_TIMEOUT = 5
ATTR_DURATION = "duration"
ATTR_FONTSIZE = "fontsize"
ATTR_POSITION = "position"
ATTR_TRANSPARENCY = "transparency"
ATTR_COLOR = "color"
ATTR_BKGCOLOR = "bkgcolor"
ATTR_INTERRUPT = "interrupt"
ATTR_IMAGE = "image"
# Attributes contained in image
ATTR_IMAGE_URL = "url"
ATTR_IMAGE_PATH = "path"
ATTR_IMAGE_USERNAME = "username"
ATTR_IMAGE_PASSWORD = "password"
ATTR_IMAGE_AUTH = "auth"
ATTR_ICON = "icon"
# Attributes contained in icon
ATTR_ICON_URL = "url"
ATTR_ICON_PATH = "path"
ATTR_ICON_USERNAME = "username"
ATTR_ICON_PASSWORD = "password"
ATTR_ICON_AUTH = "auth"
# Any other value or absence of 'auth' lead to basic authentication being used
ATTR_IMAGE_AUTH_DIGEST = "digest"
ATTR_ICON_AUTH_DIGEST = "digest"
|
jawilson/home-assistant
|
homeassistant/components/nfandroidtv/const.py
|
Python
|
apache-2.0
| 1,018
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.