repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
|---|---|---|---|---|---|
lhellebr/spacewalk
|
java/scripts/api/kickstarttests.py
|
8
|
5508
|
#!/usr/bin/python
import unittest
import random
from config import *
KICKSTART_FILE = """
# Kickstart file automatically generated by anaconda.
install
nfs --server=shell.boston.redhat.com --dir=/mnt/redhat/iso/f7-64
lang en_US.UTF-8
keyboard us
xconfig --startxonboot
network --device eth0 --bootproto dhcp --hostname manus.boston.redhat.com
rootpw --iscrypted $1$XfFmKil8$NyKJOMScsYgSE53j8/Lq51
firewall --enabled --port=22:tcp
%include /tmp/somefile
authconfig --enableshadow --enablemd5
selinux --enforcing
timezone --utc America/New_York
bootloader --location=mbr --driveorder=sda,sdb --append="rhgb quiet"
# The following is the partition information you requested
# Note that any partitions you deleted are not expressed
# here so unless you clear all partitions first, this is
# not guaranteed to work
clearpart --linux --drives=sda,sdb
part /boot --fstype ext3 --size=100 --ondisk=sda
part pv.27 --size=0 --grow --ondisk=sdb
part pv.26 --size=0 --grow --ondisk=sda
volgroup VolGroup00 --pesize=32768 pv.27 pv.26
logvol / --fstype ext3 --name=LogVol00 --vgname=VolGroup00 --size=1024 --grow
logvol swap --fstype swap --name=LogVol01 --vgname=VolGroup00 --size=1000 --grow --maxsize=1984
%packages
@office
@admin-tools
@editors
@system-tools
@japanese-support
@gnome-software-development
@text-internet
@x-software-development
@virtualization
@gnome-desktop
@core
@authoring-and-publishing
@irish-support
@base
@hardware-support
@games
@java
@java-development
@base-x
@graphics
@web-server
@ruby
@printing
@server-cfg
@development-libs
@development-tools
@sound-and-video
@graphical-internet
createrepo
festival
fuse
audit
perl-NKF
anthy
lynx
mesa-libGLU-devel
scribus
pax
bridge-utils
fonts-chinese
fonts-korean
fonts-japanese
libsane-hpaio
netpbm-progs
kdegraphics
apachetop
awstats
imake
jack-audio-connection-kit
-zsh
-zisofs-tools
-xdelta
-slrn
-ipw2200-firmware
-ipw2100-firmware
-zd1211-firmware
-iwlwifi-firmware
-synaptics
-dcraw
-gimp-help
-tux
-squid
-httpd-manual
-lockdev-devel
-expat-devel
-cyrus-sasl-devel
-gpm-devel
-pciutils-devel
-kudzu-devel
-openldap-devel
-db4-devel
-gmp-devel
-openssl-devel
-curl-devel
-coolkey-devel
-boost-devel
-libcap-devel
-gdbm-devel
-libacl-devel
-libattr-devel
-binutils-devel
-krb5-devel
-libuser-devel
-hesiod-devel
-libogg-devel
-pcsc-lite-devel
-libvorbis-devel
-krbafs-devel
-ltrace
-python-ldap
-byacc
-frysk
-gcc-gfortran
-rcs
-ccache
-swig
-pstack
-cscope
-icedax
-sox
%pre
#!/bin/sh
hds=""
mymedia=""
for file in /proc/ide/h*
do
mymedia=`cat $file/media`
if [ $mymedia == "disk" ] ; then
hds="$hds `basename $file`"
fi
done
set $hds
numhd=`echo $#`
drive1=`echo $hds | cut -d' ' -f1`
drive2=`echo $hds | cut -d' ' -f2`
#Write out partition scheme based on whether there are 1 or 2 hard drives
if [ $numhd == "2" ] ; then
#2 drives
echo "#partitioning scheme generated in %pre for 2 drives" > /tmp/part-include
echo "clearpart --all" >> /tmp/part-include
echo "part /boot --fstype ext3 --size 75 --ondisk hda" >> /tmp/part-include
echo "part / --fstype ext3 --size 1 --grow --ondisk hda" >> /tmp/part-include
echo "part swap --recommended --ondisk $drive1" >> /tmp/part-include
echo "part /home --fstype ext3 --size 1 --grow --ondisk hdb" >> /tmp/part-include
else
#1 drive
echo "#partitioning scheme generated in %pre for 1 drive" > /tmp/part-include
echo "clearpart --all" >> /tmp/part-include
echo "part /boot --fstype ext3 --size 75" >> /tmp/part-includ
echo "part swap --recommended" >> /tmp/part-include
echo "part / --fstype ext3 --size 2048" >> /tmp/part-include
echo "part /home --fstype ext3 --size 2048 --grow" >> /tmp/part-include
fi
"""
CHANNEL_LABEL = 'rhel-i386-server-5'
class KickstartTests(RhnTestCase):
def test_list_kickstartable_channels(self):
chans = client.kickstart.listKickstartableChannels(self.session_key)
# should be true on any satellite we're testing against
self.assertTrue(len(chans) > 0)
for c in chans:
self.assertTrue(c.has_key('channel_description'))
self.assertTrue(c.has_key('channel_label'))
self.assertTrue(c.has_key('channel_name'))
self.assertTrue(c.has_key('parent_channel_label'))
self.assertTrue(c.has_key('channel_summary'))
def test_list_kickstartable_trees(self):
trees = client.kickstart.listKickstartableTrees(self.session_key,
CHANNEL_LABEL)
# should be true on any satellite we're testing against
self.assertTrue(len(trees) > 0)
for t in trees:
self.assertTrue(t.has_key('channel_id'))
self.assertTrue(t.has_key('boot_image'))
self.assertTrue(t.has_key('base_path'))
self.assertTrue(t.has_key('label'))
def test_import(self):
trees = client.kickstart.listKickstartableTrees(self.session_key,
CHANNEL_LABEL)
# could fail if you ran it enough
ks_label = "test-profile-%s" % random.randint(100, 999)
client.kickstart.importFile(self.session_key, ks_label, 'none',
trees[0]['label'], False, KICKSTART_FILE)
def test_create(self):
trees = client.kickstart.listKickstartableTrees(self.session_key,
CHANNEL_LABEL)
ks_label = "api-created-%s" % random.randint(100, 999)
client.kickstart.createProfile(self.session_key, ks_label,
'none', trees[0]['label'], SATELLITE_HOST, 'rootpw')
if __name__ == "__main__":
unittest.main()
|
gpl-2.0
|
openelections/openelections-core
|
openelex/api/base.py
|
1
|
1772
|
"""OpenElex Api base wrapper"""
from future import standard_library
standard_library.install_aliases()
from collections import OrderedDict
from urllib.parse import urljoin
import requests
API_BASE_URL = "http://openelections.net/api/v1/"
BASE_PARAMS = ['format=json', 'limit=0']
def get(base_url=API_BASE_URL, resource_type='', params={}):
"""
Constructs API call from base url, resource type and GET
params. Resource type should be valid endpoint for OpenElex API,
and params should be valid Tastypie filters for a given endpoint.
Details on both can be explored at:
%(base_url)s?format=json
base_url - defaults to %(base_url)s
resource_type - [election|state|organization], etc.
params - dictionary of valid Tastypie filters
USAGE:
# Default returns list endpoints
get()
# Get
get('election', {'start_date=':'2012-11-02'})
""" % {'base_url': API_BASE_URL}
ordered_params = prepare_api_params(params)
url = urljoin(base_url, resource_type)
if not url.endswith('/'):
url += '/'
response = requests.get(url, params=ordered_params)
return response
def prepare_api_params(params):
"""Construct ordered dict of params for API call.
This method returns an alphabetized OrderedDict in order
to maximize cache hits on the API.
"""
try:
fmt = params.pop('format')
except KeyError:
fmt = 'json'
try:
limit = params.pop('limit')
except KeyError:
limit ='0'
new_params = []
for key, val in list(params.items()):
new_params.append((key, val))
new_params.sort()
new_params.extend([('format', fmt), ('limit', limit)])
ordered = OrderedDict(new_params)
return ordered
|
mit
|
dpetzold/django
|
tests/bash_completion/tests.py
|
327
|
3888
|
"""
A series of tests to establish that the command-line bash completion works.
"""
import os
import sys
import unittest
from django.apps import apps
from django.core.management import ManagementUtility
from django.test.utils import captured_stdout
class BashCompletionTests(unittest.TestCase):
"""
Testing the Python level bash completion code.
This requires setting up the environment as if we got passed data
from bash.
"""
def setUp(self):
self.old_DJANGO_AUTO_COMPLETE = os.environ.get('DJANGO_AUTO_COMPLETE')
os.environ['DJANGO_AUTO_COMPLETE'] = '1'
def tearDown(self):
if self.old_DJANGO_AUTO_COMPLETE:
os.environ['DJANGO_AUTO_COMPLETE'] = self.old_DJANGO_AUTO_COMPLETE
else:
del os.environ['DJANGO_AUTO_COMPLETE']
def _user_input(self, input_str):
"""
Set the environment and the list of command line arguments.
This sets the bash variables $COMP_WORDS and $COMP_CWORD. The former is
an array consisting of the individual words in the current command
line, the latter is the index of the current cursor position, so in
case a word is completed and the cursor is placed after a whitespace,
$COMP_CWORD must be incremented by 1:
* 'django-admin start' -> COMP_CWORD=1
* 'django-admin startproject' -> COMP_CWORD=1
* 'django-admin startproject ' -> COMP_CWORD=2
"""
os.environ['COMP_WORDS'] = input_str
idx = len(input_str.split(' ')) - 1 # Index of the last word
comp_cword = idx + 1 if input_str.endswith(' ') else idx
os.environ['COMP_CWORD'] = str(comp_cword)
sys.argv = input_str.split()
def _run_autocomplete(self):
util = ManagementUtility(argv=sys.argv)
with captured_stdout() as stdout:
try:
util.autocomplete()
except SystemExit:
pass
return stdout.getvalue().strip().split('\n')
def test_django_admin_py(self):
"django_admin.py will autocomplete option flags"
self._user_input('django-admin sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_manage_py(self):
"manage.py will autocomplete option flags"
self._user_input('manage.py sqlmigrate --verb')
output = self._run_autocomplete()
self.assertEqual(output, ['--verbosity='])
def test_custom_command(self):
"A custom command can autocomplete option flags"
self._user_input('django-admin test_command --l')
output = self._run_autocomplete()
self.assertEqual(output, ['--list'])
def test_subcommands(self):
"Subcommands can be autocompleted"
self._user_input('django-admin sql')
output = self._run_autocomplete()
self.assertEqual(output, ['sqlflush sqlmigrate sqlsequencereset'])
def test_completed_subcommand(self):
"Show option flags in case a subcommand is completed"
self._user_input('django-admin startproject ') # Trailing whitespace
output = self._run_autocomplete()
for item in output:
self.assertTrue(item.startswith('--'))
def test_help(self):
"No errors, just an empty list if there are no autocomplete options"
self._user_input('django-admin help --')
output = self._run_autocomplete()
self.assertEqual(output, [''])
def test_app_completion(self):
"Application names will be autocompleted for an AppCommand"
self._user_input('django-admin sqlmigrate a')
output = self._run_autocomplete()
a_labels = sorted(app_config.label
for app_config in apps.get_app_configs()
if app_config.label.startswith('a'))
self.assertEqual(output, a_labels)
|
bsd-3-clause
|
rsvip/Django
|
tests/gis_tests/geo3d/models.py
|
302
|
1294
|
from django.utils.encoding import python_2_unicode_compatible
from ..models import models
@python_2_unicode_compatible
class NamedModel(models.Model):
name = models.CharField(max_length=30)
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
def __str__(self):
return self.name
class City3D(NamedModel):
point = models.PointField(dim=3)
class Interstate2D(NamedModel):
line = models.LineStringField(srid=4269)
class Interstate3D(NamedModel):
line = models.LineStringField(dim=3, srid=4269)
class InterstateProj2D(NamedModel):
line = models.LineStringField(srid=32140)
class InterstateProj3D(NamedModel):
line = models.LineStringField(dim=3, srid=32140)
class Polygon2D(NamedModel):
poly = models.PolygonField(srid=32140)
class Polygon3D(NamedModel):
poly = models.PolygonField(dim=3, srid=32140)
class SimpleModel(models.Model):
objects = models.GeoManager()
class Meta:
abstract = True
required_db_features = ['gis_enabled']
class Point2D(SimpleModel):
point = models.PointField()
class Point3D(SimpleModel):
point = models.PointField(dim=3)
class MultiPoint3D(SimpleModel):
mpoint = models.MultiPointField(dim=3)
|
bsd-3-clause
|
Itxaka/libcloud
|
libcloud/compute/drivers/cloudsigma.py
|
2
|
67816
|
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Drivers for CloudSigma API v1.0 and v2.0.
"""
import re
import time
import copy
import base64
try:
import simplejson as json
except:
import json
from libcloud.utils.py3 import b
from libcloud.utils.py3 import httplib
from libcloud.utils.misc import str2dicts, str2list, dict2str
from libcloud.common.base import ConnectionUserAndKey, JsonResponse, Response
from libcloud.common.types import InvalidCredsError, ProviderError
from libcloud.common.cloudsigma import INSTANCE_TYPES
from libcloud.common.cloudsigma import API_ENDPOINTS_1_0
from libcloud.common.cloudsigma import API_ENDPOINTS_2_0
from libcloud.common.cloudsigma import DEFAULT_API_VERSION, DEFAULT_REGION
from libcloud.compute.types import NodeState, Provider
from libcloud.compute.base import NodeDriver, NodeSize, Node
from libcloud.compute.base import NodeImage
from libcloud.compute.base import is_private_subnet
from libcloud.utils.iso8601 import parse_date
from libcloud.utils.misc import get_secure_random_string
__all__ = [
'CloudSigmaNodeDriver',
'CloudSigma_1_0_NodeDriver',
'CloudSigma_2_0_NodeDriver',
'CloudSigmaError',
'CloudSigmaNodeSize',
'CloudSigmaDrive',
'CloudSigmaTag',
'CloudSigmaSubscription',
'CloudSigmaFirewallPolicy',
'CloudSigmaFirewallPolicyRule'
]
class CloudSigmaNodeDriver(NodeDriver):
name = 'CloudSigma'
website = 'http://www.cloudsigma.com/'
def __new__(cls, key, secret=None, secure=True, host=None, port=None,
api_version=DEFAULT_API_VERSION, **kwargs):
if cls is CloudSigmaNodeDriver:
if api_version == '1.0':
cls = CloudSigma_1_0_NodeDriver
elif api_version == '2.0':
cls = CloudSigma_2_0_NodeDriver
else:
raise NotImplementedError('Unsupported API version: %s' %
(api_version))
return super(CloudSigmaNodeDriver, cls).__new__(cls)
class CloudSigmaException(Exception):
def __str__(self):
return self.args[0]
def __repr__(self):
return "<CloudSigmaException '%s'>" % (self.args[0])
class CloudSigmaInsufficientFundsException(Exception):
def __repr__(self):
return "<CloudSigmaInsufficientFundsException '%s'>" % (self.args[0])
class CloudSigmaNodeSize(NodeSize):
def __init__(self, id, name, cpu, ram, disk, bandwidth, price, driver):
self.id = id
self.name = name
self.cpu = cpu
self.ram = ram
self.disk = disk
self.bandwidth = bandwidth
self.price = price
self.driver = driver
def __repr__(self):
return (('<NodeSize: id=%s, name=%s, cpu=%s, ram=%s disk=%s '
'bandwidth=%s price=%s driver=%s ...>')
% (self.id, self.name, self.cpu, self.ram, self.disk,
self.bandwidth, self.price, self.driver.name))
class CloudSigma_1_0_Response(Response):
def success(self):
if self.status == httplib.UNAUTHORIZED:
raise InvalidCredsError()
return self.status >= 200 and self.status <= 299
def parse_body(self):
if not self.body:
return self.body
return str2dicts(self.body)
def parse_error(self):
return 'Error: %s' % (self.body.replace('errors:', '').strip())
class CloudSigma_1_0_Connection(ConnectionUserAndKey):
host = API_ENDPOINTS_1_0[DEFAULT_REGION]['host']
responseCls = CloudSigma_1_0_Response
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
headers['Authorization'] = 'Basic %s' % (base64.b64encode(
b('%s:%s' % (self.user_id, self.key))).decode('utf-8'))
return headers
class CloudSigma_1_0_NodeDriver(CloudSigmaNodeDriver):
type = Provider.CLOUDSIGMA
name = 'CloudSigma (API v1.0)'
website = 'http://www.cloudsigma.com/'
connectionCls = CloudSigma_1_0_Connection
IMAGING_TIMEOUT = 20 * 60 # Default timeout (in seconds) for the drive
# imaging process
NODE_STATE_MAP = {
'active': NodeState.RUNNING,
'stopped': NodeState.TERMINATED,
'dead': NodeState.TERMINATED,
'dumped': NodeState.TERMINATED,
}
def __init__(self, key, secret=None, secure=True, host=None, port=None,
region=DEFAULT_REGION, **kwargs):
if region not in API_ENDPOINTS_1_0:
raise ValueError('Invalid region: %s' % (region))
self._host_argument_set = host is not None
self.api_name = 'cloudsigma_%s' % (region)
super(CloudSigma_1_0_NodeDriver, self).__init__(key=key, secret=secret,
secure=secure,
host=host,
port=port,
region=region,
**kwargs)
def reboot_node(self, node):
"""
Reboot a node.
Because Cloudsigma API does not provide native reboot call,
it's emulated using stop and start.
@inherits: :class:`NodeDriver.reboot_node`
"""
node = self._get_node(node.id)
state = node.state
if state == NodeState.RUNNING:
stopped = self.ex_stop_node(node)
else:
stopped = True
if not stopped:
raise CloudSigmaException(
'Could not stop node with id %s' % (node.id))
success = self.ex_start_node(node)
return success
def destroy_node(self, node):
"""
Destroy a node (all the drives associated with it are NOT destroyed).
If a node is still running, it's stopped before it's destroyed.
@inherits: :class:`NodeDriver.destroy_node`
"""
node = self._get_node(node.id)
state = node.state
# Node cannot be destroyed while running so it must be stopped first
if state == NodeState.RUNNING:
stopped = self.ex_stop_node(node)
else:
stopped = True
if not stopped:
raise CloudSigmaException(
'Could not stop node with id %s' % (node.id))
response = self.connection.request(
action='/servers/%s/destroy' % (node.id),
method='POST')
return response.status == 204
def list_images(self, location=None):
"""
Return a list of available standard images (this call might take up
to 15 seconds to return).
@inherits: :class:`NodeDriver.list_images`
"""
response = self.connection.request(
action='/drives/standard/info').object
images = []
for value in response:
if value.get('type'):
if value['type'] == 'disk':
image = NodeImage(id=value['drive'], name=value['name'],
driver=self.connection.driver,
extra={'size': value['size']})
images.append(image)
return images
def list_sizes(self, location=None):
sizes = []
for value in INSTANCE_TYPES:
key = value['id']
size = CloudSigmaNodeSize(id=value['id'], name=value['name'],
cpu=value['cpu'], ram=value['memory'],
disk=value['disk'],
bandwidth=value['bandwidth'],
price=self._get_size_price(size_id=key),
driver=self.connection.driver)
sizes.append(size)
return sizes
def list_nodes(self):
response = self.connection.request(action='/servers/info').object
nodes = []
for data in response:
node = self._to_node(data)
if node:
nodes.append(node)
return nodes
def create_node(self, **kwargs):
"""
Creates a CloudSigma instance
@inherits: :class:`NodeDriver.create_node`
:keyword name: String with a name for this new node (required)
:type name: ``str``
:keyword smp: Number of virtual processors or None to calculate
based on the cpu speed
:type smp: ``int``
:keyword nic_model: e1000, rtl8139 or virtio (is not specified,
e1000 is used)
:type nic_model: ``str``
:keyword vnc_password: If not set, VNC access is disabled.
:type vnc_password: ``bool``
:keyword drive_type: Drive type (ssd|hdd). Defaults to hdd.
:type drive_type: ``str``
"""
size = kwargs['size']
image = kwargs['image']
smp = kwargs.get('smp', 'auto')
nic_model = kwargs.get('nic_model', 'e1000')
vnc_password = kwargs.get('vnc_password', None)
drive_type = kwargs.get('drive_type', 'hdd')
if nic_model not in ['e1000', 'rtl8139', 'virtio']:
raise CloudSigmaException('Invalid NIC model specified')
if drive_type not in ['hdd', 'ssd']:
raise CloudSigmaException('Invalid drive type "%s". Valid types'
' are: hdd, ssd' % (drive_type))
drive_data = {}
drive_data.update({'name': kwargs['name'],
'size': '%sG' % (kwargs['size'].disk),
'driveType': drive_type})
response = self.connection.request(
action='/drives/%s/clone' % image.id,
data=dict2str(drive_data),
method='POST').object
if not response:
raise CloudSigmaException('Drive creation failed')
drive_uuid = response[0]['drive']
response = self.connection.request(
action='/drives/%s/info' % (drive_uuid)).object
imaging_start = time.time()
while 'imaging' in response[0]:
response = self.connection.request(
action='/drives/%s/info' % (drive_uuid)).object
elapsed_time = time.time() - imaging_start
timed_out = elapsed_time >= self.IMAGING_TIMEOUT
if 'imaging' in response[0] and timed_out:
raise CloudSigmaException('Drive imaging timed out')
time.sleep(1)
node_data = {}
node_data.update(
{'name': kwargs['name'], 'cpu': size.cpu, 'mem': size.ram,
'ide:0:0': drive_uuid, 'boot': 'ide:0:0', 'smp': smp})
node_data.update({'nic:0:model': nic_model, 'nic:0:dhcp': 'auto'})
if vnc_password:
node_data.update({'vnc:ip': 'auto', 'vnc:password': vnc_password})
response = self.connection.request(action='/servers/create',
data=dict2str(node_data),
method='POST').object
if not isinstance(response, list):
response = [response]
node = self._to_node(response[0])
if node is None:
# Insufficient funds, destroy created drive
self.ex_drive_destroy(drive_uuid)
raise CloudSigmaInsufficientFundsException(
'Insufficient funds, node creation failed')
# Start the node after it has been created
started = self.ex_start_node(node)
if started:
node.state = NodeState.RUNNING
return node
def ex_destroy_node_and_drives(self, node):
"""
Destroy a node and all the drives associated with it.
:param node: Node which should be used
:type node: :class:`libcloud.compute.base.Node`
:rtype: ``bool``
"""
node = self._get_node_info(node)
drive_uuids = []
for key, value in node.items():
if (key.startswith('ide:') or key.startswith(
'scsi') or key.startswith('block')) and\
not (key.endswith(':bytes') or
key.endswith(':requests') or key.endswith('media')):
drive_uuids.append(value)
node_destroyed = self.destroy_node(self._to_node(node))
if not node_destroyed:
return False
for drive_uuid in drive_uuids:
self.ex_drive_destroy(drive_uuid)
return True
def ex_static_ip_list(self):
"""
Return a list of available static IP addresses.
:rtype: ``list`` of ``str``
"""
response = self.connection.request(action='/resources/ip/list',
method='GET')
if response.status != 200:
raise CloudSigmaException('Could not retrieve IP list')
ips = str2list(response.body)
return ips
def ex_drives_list(self):
"""
Return a list of all the available drives.
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(action='/drives/info', method='GET')
result = str2dicts(response.body)
return result
def ex_static_ip_create(self):
"""
Create a new static IP address.p
:rtype: ``list`` of ``dict``
"""
response = self.connection.request(action='/resources/ip/create',
method='GET')
result = str2dicts(response.body)
return result
def ex_static_ip_destroy(self, ip_address):
"""
Destroy a static IP address.
:param ip_address: IP address which should be used
:type ip_address: ``str``
:rtype: ``bool``
"""
response = self.connection.request(
action='/resources/ip/%s/destroy' % (ip_address), method='GET')
return response.status == 204
def ex_drive_destroy(self, drive_uuid):
"""
Destroy a drive with a specified uuid.
If the drive is currently mounted an exception is thrown.
:param drive_uuid: Drive uuid which should be used
:type drive_uuid: ``str``
:rtype: ``bool``
"""
response = self.connection.request(
action='/drives/%s/destroy' % (drive_uuid), method='POST')
return response.status == 204
def ex_set_node_configuration(self, node, **kwargs):
"""
Update a node configuration.
Changing most of the parameters requires node to be stopped.
:param node: Node which should be used
:type node: :class:`libcloud.compute.base.Node`
:param kwargs: keyword arguments
:type kwargs: ``dict``
:rtype: ``bool``
"""
valid_keys = ('^name$', '^parent$', '^cpu$', '^smp$', '^mem$',
'^boot$', '^nic:0:model$', '^nic:0:dhcp',
'^nic:1:model$', '^nic:1:vlan$', '^nic:1:mac$',
'^vnc:ip$', '^vnc:password$', '^vnc:tls',
'^ide:[0-1]:[0-1](:media)?$', '^scsi:0:[0-7](:media)?$',
'^block:[0-7](:media)?$')
invalid_keys = []
keys = list(kwargs.keys())
for key in keys:
matches = False
for regex in valid_keys:
if re.match(regex, key):
matches = True
break
if not matches:
invalid_keys.append(key)
if invalid_keys:
raise CloudSigmaException(
'Invalid configuration key specified: %s' %
(',' .join(invalid_keys)))
response = self.connection.request(
action='/servers/%s/set' % (node.id),
data=dict2str(kwargs),
method='POST')
return (response.status == 200 and response.body != '')
def ex_start_node(self, node):
"""
Start a node.
:param node: Node which should be used
:type node: :class:`libcloud.compute.base.Node`
:rtype: ``bool``
"""
response = self.connection.request(
action='/servers/%s/start' % (node.id),
method='POST')
return response.status == 200
def ex_stop_node(self, node):
"""
Stop (shutdown) a node.
:param node: Node which should be used
:type node: :class:`libcloud.compute.base.Node`
:rtype: ``bool``
"""
response = self.connection.request(
action='/servers/%s/stop' % (node.id),
method='POST')
return response.status == 204
def ex_shutdown_node(self, node):
"""
Stop (shutdown) a node.
@inherits: :class:`CloudSigmaBaseNodeDriver.ex_stop_node`
"""
return self.ex_stop_node(node)
def ex_destroy_drive(self, drive_uuid):
"""
Destroy a drive.
:param drive_uuid: Drive uuid which should be used
:type drive_uuid: ``str``
:rtype: ``bool``
"""
response = self.connection.request(
action='/drives/%s/destroy' % (drive_uuid),
method='POST')
return response.status == 204
def _ex_connection_class_kwargs(self):
"""
Return the host value based on the user supplied region.
"""
kwargs = {}
if not self._host_argument_set:
kwargs['host'] = API_ENDPOINTS_1_0[self.region]['host']
return kwargs
def _to_node(self, data):
if data:
try:
state = self.NODE_STATE_MAP[data['status']]
except KeyError:
state = NodeState.UNKNOWN
if 'server' not in data:
# Response does not contain server UUID if the server
# creation failed because of insufficient funds.
return None
public_ips = []
if 'nic:0:dhcp' in data:
if isinstance(data['nic:0:dhcp'], list):
public_ips = data['nic:0:dhcp']
else:
public_ips = [data['nic:0:dhcp']]
extra = {}
extra_keys = [('cpu', 'int'), ('smp', 'auto'), ('mem', 'int'),
('status', 'str')]
for key, value_type in extra_keys:
if key in data:
value = data[key]
if value_type == 'int':
value = int(value)
elif value_type == 'auto':
try:
value = int(value)
except ValueError:
pass
extra.update({key: value})
if 'vnc:ip' in data and 'vnc:password' in data:
extra.update({'vnc_ip': data['vnc:ip'],
'vnc_password': data['vnc:password']})
node = Node(id=data['server'], name=data['name'], state=state,
public_ips=public_ips, private_ips=None,
driver=self.connection.driver,
extra=extra)
return node
return None
def _get_node(self, node_id):
nodes = self.list_nodes()
node = [node for node in nodes if node.id == node.id]
if not node:
raise CloudSigmaException(
'Node with id %s does not exist' % (node_id))
return node[0]
def _get_node_info(self, node):
response = self.connection.request(
action='/servers/%s/info' % (node.id))
result = str2dicts(response.body)
return result[0]
class CloudSigmaZrhConnection(CloudSigma_1_0_Connection):
"""
Connection class for the CloudSigma driver for the Zurich end-point
"""
host = API_ENDPOINTS_1_0['zrh']['host']
class CloudSigmaZrhNodeDriver(CloudSigma_1_0_NodeDriver):
"""
CloudSigma node driver for the Zurich end-point
"""
connectionCls = CloudSigmaZrhConnection
api_name = 'cloudsigma_zrh'
class CloudSigmaLvsConnection(CloudSigma_1_0_Connection):
"""
Connection class for the CloudSigma driver for the Las Vegas end-point
"""
host = API_ENDPOINTS_1_0['lvs']['host']
class CloudSigmaLvsNodeDriver(CloudSigma_1_0_NodeDriver):
"""
CloudSigma node driver for the Las Vegas end-point
"""
connectionCls = CloudSigmaLvsConnection
api_name = 'cloudsigma_lvs'
class CloudSigmaError(ProviderError):
"""
Represents CloudSigma API error.
"""
def __init__(self, http_code, error_type, error_msg, error_point, driver):
"""
:param http_code: HTTP status code.
:type http_code: ``int``
:param error_type: Type of error (validation / notexist / backend /
permissions database / concurrency / billing /
payment)
:type error_type: ``str``
:param error_msg: A description of the error that occurred.
:type error_msg: ``str``
:param error_point: Point at which the error occurred. Can be None.
:type error_point: ``str`` or ``None``
"""
super(CloudSigmaError, self).__init__(http_code=http_code,
value=error_msg, driver=driver)
self.error_type = error_type
self.error_msg = error_msg
self.error_point = error_point
class CloudSigmaSubscription(object):
"""
Represents CloudSigma subscription.
"""
def __init__(self, id, resource, amount, period, status, price, start_time,
end_time, auto_renew, subscribed_object=None):
"""
:param id: Subscription ID.
:type id: ``str``
:param resource: Resource (e.g vlan, ip, etc.).
:type resource: ``str``
:param period: Subscription period.
:type period: ``str``
:param status: Subscription status (active / inactive).
:type status: ``str``
:param price: Subscription price.
:type price: ``str``
:param start_time: Start time for this subscription.
:type start_time: ``datetime.datetime``
:param end_time: End time for this subscription.
:type end_time: ``datetime.datetime``
:param auto_renew: True if the subscription is auto renewed.
:type auto_renew: ``bool``
:param subscribed_object: Optional UUID of the subscribed object.
:type subscribed_object: ``str``
"""
self.id = id
self.resource = resource
self.amount = amount
self.period = period
self.status = status
self.price = price
self.start_time = start_time
self.end_time = end_time
self.auto_renew = auto_renew
self.subscribed_object = subscribed_object
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<CloudSigmaSubscription id=%s, resource=%s, amount=%s, '
'period=%s, object_uuid=%s>' %
(self.id, self.resource, self.amount, self.period,
self.subscribed_object))
class CloudSigmaTag(object):
"""
Represents a CloudSigma tag object.
"""
def __init__(self, id, name, resources=None):
"""
:param id: Tag ID.
:type id: ``str``
:param name: Tag name.
:type name: ``str``
:param resource: IDs of resources which are associated with this tag.
:type resources: ``list`` of ``str``
"""
self.id = id
self.name = name
self.resources = resources if resources else []
def __str__(self):
return self.__repr__()
def __repr__(self):
return ('<CloudSigmaTag id=%s, name=%s, resources=%s>' %
(self.id, self.name, repr(self.resources)))
class CloudSigmaDrive(NodeImage):
"""
Represents a CloudSigma drive.
"""
def __init__(self, id, name, size, media, status, driver, extra=None):
"""
:param id: Drive ID.
:type id: ``str``
:param name: Drive name.
:type name: ``str``
:param size: Drive size (in bytes).
:type size: ``int``
:param media: Drive media (cdrom / disk).
:type media: ``str``
:param status: Drive status (unmounted / mounted).
:type status: ``str``
"""
super(CloudSigmaDrive, self).__init__(id=id, name=name, driver=driver,
extra=extra)
self.size = size
self.media = media
self.status = status
def __str__(self):
return self.__repr__()
def __repr__(self):
return (('<CloudSigmaSize id=%s, name=%s size=%s, media=%s, '
'status=%s>') %
(self.id, self.name, self.size, self.media, self.status))
class CloudSigmaFirewallPolicy(object):
"""
Represents a CloudSigma firewall policy.
"""
def __init__(self, id, name, rules):
"""
:param id: Policy ID.
:type id: ``str``
:param name: Policy name.
:type name: ``str``
:param rules: Rules associated with this policy.
:type rules: ``list`` of :class:`.CloudSigmaFirewallPolicyRule` objects
"""
self.id = id
self.name = name
self.rules = rules if rules else []
def __str__(self):
return self.__repr__()
def __repr__(self):
return (('<CloudSigmaFirewallPolicy id=%s, name=%s rules=%s>') %
(self.id, self.name, repr(self.rules)))
class CloudSigmaFirewallPolicyRule(object):
"""
Represents a CloudSigma firewall policy rule.
"""
def __init__(self, action, direction, ip_proto=None, src_ip=None,
src_port=None, dst_ip=None, dst_port=None, comment=None):
"""
:param action: Action (drop / accept).
:type action: ``str``
:param direction: Rule direction (in / out / both)>
:type direction: ``str``
:param ip_proto: IP protocol (tcp / udp).
:type ip_proto: ``str``.
:param src_ip: Source IP in CIDR notation.
:type src_ip: ``str``
:param src_port: Source port or a port range.
:type src_port: ``str``
:param dst_ip: Destination IP in CIDR notation.
:type dst_ip: ``str``
:param src_port: Destination port or a port range.
:type src_port: ``str``
:param comment: Comment associated with the policy.
:type comment: ``str``
"""
self.action = action
self.direction = direction
self.ip_proto = ip_proto
self.src_ip = src_ip
self.src_port = src_port
self.dst_ip = dst_ip
self.dst_port = dst_port
self.comment = comment
def __str__(self):
return self.__repr__()
def __repr__(self):
return (('<CloudSigmaFirewallPolicyRule action=%s, direction=%s>') %
(self.action, self.direction))
class CloudSigma_2_0_Response(JsonResponse):
success_status_codes = [
httplib.OK,
httplib.ACCEPTED,
httplib.NO_CONTENT,
httplib.CREATED
]
def success(self):
return self.status in self.success_status_codes
def parse_error(self):
if int(self.status) == httplib.UNAUTHORIZED:
raise InvalidCredsError('Invalid credentials')
body = self.parse_body()
errors = self._parse_errors_from_body(body=body)
if errors:
# Throw first error
raise errors[0]
return body
def _parse_errors_from_body(self, body):
"""
Parse errors from the response body.
:return: List of error objects.
:rtype: ``list`` of :class:`.CloudSigmaError` objects
"""
errors = []
if not isinstance(body, list):
return None
for item in body:
if 'error_type' not in item:
# Unrecognized error
continue
error = CloudSigmaError(http_code=self.status,
error_type=item['error_type'],
error_msg=item['error_message'],
error_point=item['error_point'],
driver=self.connection.driver)
errors.append(error)
return errors
class CloudSigma_2_0_Connection(ConnectionUserAndKey):
host = API_ENDPOINTS_2_0[DEFAULT_REGION]['host']
responseCls = CloudSigma_2_0_Response
api_prefix = '/api/2.0'
def add_default_headers(self, headers):
headers['Accept'] = 'application/json'
headers['Content-Type'] = 'application/json'
headers['Authorization'] = 'Basic %s' % (base64.b64encode(
b('%s:%s' % (self.user_id, self.key))).decode('utf-8'))
return headers
def encode_data(self, data):
data = json.dumps(data)
return data
def request(self, action, params=None, data=None, headers=None,
method='GET', raw=False):
params = params or {}
action = self.api_prefix + action
if method == 'GET':
params['limit'] = 0 # we want all the items back
return super(CloudSigma_2_0_Connection, self).request(action=action,
params=params,
data=data,
headers=headers,
method=method,
raw=raw)
class CloudSigma_2_0_NodeDriver(CloudSigmaNodeDriver):
"""
Driver for CloudSigma API v2.0.
"""
name = 'CloudSigma (API v2.0)'
api_name = 'cloudsigma_zrh'
website = 'http://www.cloudsigma.com/'
connectionCls = CloudSigma_2_0_Connection
# Default drive transition timeout in seconds
DRIVE_TRANSITION_TIMEOUT = 500
# How long to sleep between different polling periods while waiting for
# drive transition
DRIVE_TRANSITION_SLEEP_INTERVAL = 5
NODE_STATE_MAP = {
'starting': NodeState.PENDING,
'stopping': NodeState.PENDING,
'unavailable': NodeState.PENDING,
'running': NodeState.RUNNING,
'stopped': NodeState.STOPPED,
'paused': NodeState.STOPPED
}
def __init__(self, key, secret, secure=True, host=None, port=None,
region=DEFAULT_REGION, **kwargs):
if region not in API_ENDPOINTS_2_0:
raise ValueError('Invalid region: %s' % (region))
if not secure:
# CloudSigma drive uses Basic Auth authentication and we don't want
# to allow user to accidentally send credentials over the wire in
# plain-text
raise ValueError('CloudSigma driver only supports a '
'secure connection')
self._host_argument_set = host is not None
super(CloudSigma_2_0_NodeDriver, self).__init__(key=key, secret=secret,
secure=secure,
host=host, port=port,
region=region,
**kwargs)
def list_nodes(self, ex_tag=None):
"""
List available nodes.
:param ex_tag: If specified, only return servers tagged with the
provided tag.
:type ex_tag: :class:`CloudSigmaTag`
"""
if ex_tag:
action = '/tags/%s/servers/detail/' % (ex_tag.id)
else:
action = '/servers/detail/'
response = self.connection.request(action=action, method='GET').object
nodes = [self._to_node(data=item) for item in response['objects']]
return nodes
def list_sizes(self):
"""
List available sizes.
"""
sizes = []
for value in INSTANCE_TYPES:
key = value['id']
size = CloudSigmaNodeSize(id=value['id'], name=value['name'],
cpu=value['cpu'], ram=value['memory'],
disk=value['disk'],
bandwidth=value['bandwidth'],
price=self._get_size_price(size_id=key),
driver=self.connection.driver)
sizes.append(size)
return sizes
def list_images(self):
"""
Return a list of available pre-installed library drives.
Note: If you want to list all the available library drives (both
pre-installed and installation CDs), use :meth:`ex_list_library_drives`
method.
"""
response = self.connection.request(action='/libdrives/').object
images = [self._to_image(data=item) for item in response['objects']]
# We filter out non pre-installed library drives by default because
# they can't be used directly following a default Libcloud server
# creation flow.
images = [image for image in images if
image.extra['image_type'] == 'preinst']
return images
def create_node(self, name, size, image, ex_metadata=None,
ex_vnc_password=None, ex_avoid=None, ex_vlan=None):
"""
Create a new server.
Server creation consists multiple steps depending on the type of the
image used.
1. Installation CD:
1. Create a server and attach installation cd
2. Start a server
2. Pre-installed image:
1. Clone provided library drive so we can use it
2. Resize cloned drive to the desired size
3. Create a server and attach cloned drive
4. Start a server
:param ex_metadata: Key / value pairs to associate with the
created node. (optional)
:type ex_metadata: ``dict``
:param ex_vnc_password: Password to use for VNC access. If not
provided, random password is generated.
:type ex_vnc_password: ``str``
:param ex_avoid: A list of server UUIDs to avoid when starting this
node. (optional)
:type ex_avoid: ``list``
:param ex_vlan: Optional UUID of a VLAN network to use. If specified,
server will have two nics assigned - 1 with a public ip
and 1 with the provided VLAN.
:type ex_vlan: ``str``
"""
is_installation_cd = self._is_installation_cd(image=image)
if ex_vnc_password:
vnc_password = ex_vnc_password
else:
# VNC password is not provided, generate a random one.
vnc_password = get_secure_random_string(size=12)
drive_name = '%s-drive' % (name)
# size is specified in GB
drive_size = (size.disk * 1024 * 1024 * 1024)
if not is_installation_cd:
# 1. Clone library drive so we can use it
drive = self.ex_clone_drive(drive=image, name=drive_name)
# Wait for drive clone to finish
drive = self._wait_for_drive_state_transition(drive=drive,
state='unmounted')
# 2. Resize drive to the desired disk size if the desired disk size
# is larger than the cloned drive size.
if drive_size > drive.size:
drive = self.ex_resize_drive(drive=drive, size=drive_size)
# Wait for drive resize to finish
drive = self._wait_for_drive_state_transition(drive=drive,
state='unmounted')
else:
# No need to clone installation CDs
drive = image
# 3. Create server and attach cloned drive
# ide 0:0
data = {}
data['name'] = name
data['cpu'] = size.cpu
data['mem'] = (size.ram * 1024 * 1024)
data['vnc_password'] = vnc_password
if ex_metadata:
data['meta'] = ex_metadata
# Assign 1 public interface (DHCP) to the node
nic = {
'boot_order': None,
'ip_v4_conf': {
'conf': 'dhcp',
},
'ip_v6_conf': None
}
nics = [nic]
if ex_vlan:
# Assign another interface for VLAN
nic = {
'boot_order': None,
'ip_v4_conf': None,
'ip_v6_conf': None,
'vlan': ex_vlan
}
nics.append(nic)
# Need to use IDE for installation CDs
if is_installation_cd:
device_type = 'ide'
else:
device_type = 'virtio'
drive = {
'boot_order': 1,
'dev_channel': '0:0',
'device': device_type,
'drive': drive.id
}
drives = [drive]
data['nics'] = nics
data['drives'] = drives
action = '/servers/'
response = self.connection.request(action=action, method='POST',
data=data)
node = self._to_node(response.object['objects'][0])
# 4. Start server
self.ex_start_node(node=node, ex_avoid=ex_avoid)
return node
def destroy_node(self, node):
"""
Destroy the node and all the associated drives.
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
action = '/servers/%s/' % (node.id)
params = {'recurse': 'all_drives'}
response = self.connection.request(action=action, method='DELETE',
params=params)
return response.status == httplib.NO_CONTENT
# Server extension methods
def ex_edit_node(self, node, params):
"""
Edit a node.
:param node: Node to edit.
:type node: :class:`libcloud.compute.base.Node`
:param params: Node parameters to update.
:type params: ``dict``
:return Edited node.
:rtype: :class:`libcloud.compute.base.Node`
"""
data = {}
# name, cpu, mem and vnc_password attributes must always be present so
# we just copy them from the to-be-edited node
data['name'] = node.name
data['cpu'] = node.extra['cpu']
data['mem'] = node.extra['mem']
data['vnc_password'] = node.extra['vnc_password']
nics = copy.deepcopy(node.extra.get('nics', []))
data['nics'] = nics
data.update(params)
action = '/servers/%s/' % (node.id)
response = self.connection.request(action=action, method='PUT',
data=data).object
node = self._to_node(data=response)
return node
def ex_start_node(self, node, ex_avoid=None):
"""
Start a node.
:param node: Node to start.
:type node: :class:`libcloud.compute.base.Node`
:param ex_avoid: A list of other server uuids to avoid when
starting this node. If provided, node will
attempt to be started on a different
physical infrastructure from other servers
specified using this argument. (optional)
:type ex_avoid: ``list``
"""
params = {}
if ex_avoid:
params['avoid'] = ','.join(ex_avoid)
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='start',
params=params,
method='POST')
return response.status == httplib.ACCEPTED
def ex_stop_node(self, node):
"""
Stop a node.
"""
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='stop',
method='POST')
return response.status == httplib.ACCEPTED
def ex_clone_node(self, node, name=None, random_vnc_password=None):
"""
Clone the provided node.
:param name: Optional name for the cloned node.
:type name: ``str``
:param random_vnc_password: If True, a new random VNC password will be
generated for the cloned node. Otherwise
password from the cloned node will be
reused.
:type random_vnc_password: ``bool``
:return: Cloned node.
:rtype: :class:`libcloud.compute.base.Node`
"""
data = {}
data['name'] = name
data['random_vnc_password'] = random_vnc_password
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='clone',
method='POST', data=data).object
node = self._to_node(data=response)
return node
def ex_open_vnc_tunnel(self, node):
"""
Open a VNC tunnel to the provided node and return the VNC url.
:param node: Node to open the VNC tunnel to.
:type node: :class:`libcloud.compute.base.Node`
:return: URL of the opened VNC tunnel.
:rtype: ``str``
"""
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='open_vnc',
method='POST').object
vnc_url = response['vnc_url']
return vnc_url
def ex_close_vnc_tunnel(self, node):
"""
Close a VNC server to the provided node.
:param node: Node to close the VNC tunnel to.
:type node: :class:`libcloud.compute.base.Node`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
path = '/servers/%s/action/' % (node.id)
response = self._perform_action(path=path, action='close_vnc',
method='POST')
return response.status == httplib.ACCEPTED
# Drive extension methods
def ex_list_library_drives(self):
"""
Return a list of all the available library drives (pre-installed and
installation CDs).
:rtype: ``list`` of :class:`.CloudSigmaDrive` objects
"""
response = self.connection.request(action='/libdrives/').object
drives = [self._to_drive(data=item) for item in response['objects']]
return drives
def ex_list_user_drives(self):
"""
Return a list of all the available user's drives.
:rtype: ``list`` of :class:`.CloudSigmaDrive` objects
"""
response = self.connection.request(action='/drives/detail/').object
drives = [self._to_drive(data=item) for item in response['objects']]
return drives
def ex_create_drive(self, name, size, media='disk', ex_avoid=None):
"""
Create a new drive.
:param name: Drive name.
:type name: ``str``
:param size: Drive size in bytes.
:type size: ``int``
:param media: Drive media type (cdrom, disk).
:type media: ``str``
:param ex_avoid: A list of other drive uuids to avoid when
creating this drive. If provided, drive will
attempt to be created on a different
physical infrastructure from other drives
specified using this argument. (optional)
:type ex_avoid: ``list``
:return: Created drive object.
:rtype: :class:`.CloudSigmaDrive`
"""
params = {}
data = {
'name': name,
'size': size,
'media': media
}
if ex_avoid:
params['avoid'] = ','.join(ex_avoid)
action = '/drives/'
response = self.connection.request(action=action, method='POST',
params=params, data=data).object
drive = self._to_drive(data=response['objects'][0])
return drive
def ex_clone_drive(self, drive, name=None, ex_avoid=None):
"""
Clone a library or a standard drive.
:param drive: Drive to clone.
:type drive: :class:`libcloud.compute.base.NodeImage` or
:class:`.CloudSigmaDrive`
:param name: Optional name for the cloned drive.
:type name: ``str``
:param ex_avoid: A list of other drive uuids to avoid when
creating this drive. If provided, drive will
attempt to be created on a different
physical infrastructure from other drives
specified using this argument. (optional)
:type ex_avoid: ``list``
:return: New cloned drive.
:rtype: :class:`.CloudSigmaDrive`
"""
params = {}
data = {}
if ex_avoid:
params['avoid'] = ','.join(ex_avoid)
if name:
data['name'] = name
path = '/drives/%s/action/' % (drive.id)
response = self._perform_action(path=path, action='clone',
params=params, data=data,
method='POST')
drive = self._to_drive(data=response.object['objects'][0])
return drive
def ex_resize_drive(self, drive, size):
"""
Resize a drive.
:param drive: Drive to resize.
:param size: New drive size in bytes.
:type size: ``int``
:return: Drive object which is being resized.
:rtype: :class:`.CloudSigmaDrive`
"""
path = '/drives/%s/action/' % (drive.id)
data = {'name': drive.name, 'size': size, 'media': 'disk'}
response = self._perform_action(path=path, action='resize',
method='POST', data=data)
drive = self._to_drive(data=response.object['objects'][0])
return drive
def ex_attach_drive(self, node):
"""
Attach a drive to the provided node.
"""
# TODO
pass
def ex_get_drive(self, drive_id):
"""
Retrieve information about a single drive.
:param drive_id: ID of the drive to retrieve.
:type drive_id: ``str``
:return: Drive object.
:rtype: :class:`.CloudSigmaDrive`
"""
action = '/drives/%s/' % (drive_id)
response = self.connection.request(action=action).object
drive = self._to_drive(data=response)
return drive
# Firewall policies extension methods
def ex_list_firewall_policies(self):
"""
List firewall policies.
:rtype: ``list`` of :class:`.CloudSigmaFirewallPolicy`
"""
action = '/fwpolicies/detail/'
response = self.connection.request(action=action, method='GET').object
policies = [self._to_firewall_policy(data=item) for item
in response['objects']]
return policies
def ex_create_firewall_policy(self, name, rules=None):
"""
Create a firewall policy.
:param name: Policy name.
:type name: ``str``
:param rules: List of firewall policy rules to associate with this
policy. (optional)
:type rules: ``list`` of ``dict``
:return: Created firewall policy object.
:rtype: :class:`.CloudSigmaFirewallPolicy`
"""
data = {}
obj = {}
obj['name'] = name
if rules:
obj['rules'] = rules
data['objects'] = [obj]
action = '/fwpolicies/'
response = self.connection.request(action=action, method='POST',
data=data).object
policy = self._to_firewall_policy(data=response['objects'][0])
return policy
def ex_attach_firewall_policy(self, policy, node, nic_mac=None):
"""
Attach firewall policy to a public NIC interface on the server.
:param policy: Firewall policy to attach.
:type policy: :class:`.CloudSigmaFirewallPolicy`
:param node: Node to attach policy to.
:type node: :class:`libcloud.compute.base.Node`
:param nic_mac: Optional MAC address of the NIC to add the policy to.
If not specified, first public interface is used
instead.
:type nic_mac: ``str``
:return: Node object to which the policy was attached to.
:rtype: :class:`libcloud.compute.base.Node`
"""
nics = copy.deepcopy(node.extra.get('nics', []))
if nic_mac:
nic = [n for n in nics if n['mac'] == nic_mac]
else:
nic = nics
if len(nic) == 0:
raise ValueError('Cannot find the NIC interface to attach '
'a policy to')
nic = nic[0]
nic['firewall_policy'] = policy.id
params = {'nics': nics}
node = self.ex_edit_node(node=node, params=params)
return node
def ex_delete_firewall_policy(self, policy):
"""
Delete a firewall policy.
:param policy: Policy to delete to.
:type policy: :class:`.CloudSigmaFirewallPolicy`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
action = '/fwpolicies/%s/' % (policy.id)
response = self.connection.request(action=action, method='DELETE')
return response.status == httplib.NO_CONTENT
# Availability groups extension methods
def ex_list_servers_availability_groups(self):
"""
Return which running servers share the same physical compute host.
:return: A list of server UUIDs which share the same physical compute
host. Servers which share the same host will be stored under
the same list index.
:rtype: ``list`` of ``list``
"""
action = '/servers/availability_groups/'
response = self.connection.request(action=action, method='GET')
return response.object
def ex_list_drives_availability_groups(self):
"""
Return which drives share the same physical storage host.
:return: A list of drive UUIDs which share the same physical storage
host. Drives which share the same host will be stored under
the same list index.
:rtype: ``list`` of ``list``
"""
action = '/drives/availability_groups/'
response = self.connection.request(action=action, method='GET')
return response.object
# Tag extension methods
def ex_list_tags(self):
"""
List all the available tags.
:rtype: ``list`` of :class:`.CloudSigmaTag` objects
"""
action = '/tags/detail/'
response = self.connection.request(action=action, method='GET').object
tags = [self._to_tag(data=item) for item in response['objects']]
return tags
def ex_get_tag(self, tag_id):
"""
Retrieve a single tag.
:param tag_id: ID of the tag to retrieve.
:type tag_id: ``str``
:rtype: ``list`` of :class:`.CloudSigmaTag` objects
"""
action = '/tags/%s/' % (tag_id)
response = self.connection.request(action=action, method='GET').object
tag = self._to_tag(data=response)
return tag
def ex_create_tag(self, name, resource_uuids=None):
"""
Create a tag.
:param name: Tag name.
:type name: ``str``
:param resource_uuids: Optional list of resource UUIDs to assign this
tag go.
:type resource_uuids: ``list`` of ``str``
:return: Created tag object.
:rtype: :class:`.CloudSigmaTag`
"""
data = {}
data['objects'] = [
{
'name': name
}
]
if resource_uuids:
data['resources'] = resource_uuids
action = '/tags/'
response = self.connection.request(action=action, method='POST',
data=data).object
tag = self._to_tag(data=response['objects'][0])
return tag
def ex_tag_resource(self, resource, tag):
"""
Associate tag with the provided resource.
:param resource: Resource to associate a tag with.
:type resource: :class:`libcloud.compute.base.Node` or
:class:`.CloudSigmaDrive`
:param tag: Tag to associate with the resources.
:type tag: :class:`.CloudSigmaTag`
:return: Updated tag object.
:rtype: :class:`.CloudSigmaTag`
"""
if not hasattr(resource, 'id'):
raise ValueError('Resource doesn\'t have id attribute')
return self.ex_tag_resources(resources=[resource], tag=tag)
def ex_tag_resources(self, resources, tag):
"""
Associate tag with the provided resources.
:param resources: Resources to associate a tag with.
:type resources: ``list`` of :class:`libcloud.compute.base.Node` or
:class:`.CloudSigmaDrive`
:param tag: Tag to associate with the resources.
:type tag: :class:`.CloudSigmaTag`
:return: Updated tag object.
:rtype: :class:`.CloudSigmaTag`
"""
resources = tag.resources[:]
for resource in resources:
if not hasattr(resource, 'id'):
raise ValueError('Resource doesn\'t have id attribute')
resources.append(resource.id)
resources = list(set(resources))
data = {
'name': tag.name,
'resources': resources
}
action = '/tags/%s/' % (tag.id)
response = self.connection.request(action=action, method='PUT',
data=data).object
tag = self._to_tag(data=response)
return tag
def ex_delete_tag(self, tag):
"""
Delete a tag.
:param tag: Tag to delete.
:type tag: :class:`.CloudSigmaTag`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
action = '/tags/%s/' % (tag.id)
response = self.connection.request(action=action, method='DELETE')
return response.status == httplib.NO_CONTENT
# Account extension methods
def ex_get_balance(self):
"""
Retrueve account balance information.
:return: Dictionary with two items ("balance" and "currency").
:rtype: ``dict``
"""
action = '/balance/'
response = self.connection.request(action=action, method='GET')
return response.object
def ex_get_pricing(self):
"""
Retrive pricing information that are applicable to the cloud.
:return: Dictionary with pricing information.
:rtype: ``dict``
"""
action = '/pricing/'
response = self.connection.request(action=action, method='GET')
return response.object
def ex_get_usage(self):
"""
Retrieve account current usage information.
:return: Dictionary with two items ("balance" and "usage").
:rtype: ``dict``
"""
action = '/currentusage/'
response = self.connection.request(action=action, method='GET')
return response.object
def ex_list_subscriptions(self, status='all', resources=None):
"""
List subscriptions for this account.
:param status: Only return subscriptions with the provided status
(optional).
:type status: ``str``
:param resources: Only return subscriptions for the provided resources
(optional).
:type resources: ``list``
:rtype: ``list``
"""
params = {}
if status:
params['status'] = status
if resources:
params['resource'] = ','.join(resources)
response = self.connection.request(action='/subscriptions/',
params=params).object
subscriptions = self._to_subscriptions(data=response)
return subscriptions
def ex_toggle_subscription_auto_renew(self, subscription):
"""
Toggle subscription auto renew status.
:param subscription: Subscription to toggle the auto renew flag for.
:type subscription: :class:`.CloudSigmaSubscription`
:return: ``True`` on success, ``False`` otherwise.
:rtype: ``bool``
"""
path = '/subscriptions/%s/action/' % (subscription.id)
response = self._perform_action(path=path, action='auto_renew',
method='POST')
return response.status == httplib.OK
def ex_create_subscription(self, amount, period, resource,
auto_renew=False):
"""
Create a new subscription.
:param amount: Subscription amount. For example, in dssd case this
would be disk size in gigabytes.
:type amount: ``int``
:param period: Subscription period. For example: 30 days, 1 week, 1
month, ...
:type period: ``str``
:param resource: Resource the purchase the subscription for.
:type resource: ``str``
:param auto_renew: True to automatically renew the subscription.
:type auto_renew: ``bool``
"""
data = [
{
'amount': amount,
'period': period,
'auto_renew': auto_renew,
'resource': resource
}
]
response = self.connection.request(action='/subscriptions/',
data=data, method='POST')
data = response.object['objects'][0]
subscription = self._to_subscription(data=data)
return subscription
# Misc extension methods
def ex_list_capabilities(self):
"""
Retrieve all the basic and sensible limits of the API.
:rtype: ``dict``
"""
action = '/capabilities/'
response = self.connection.request(action=action,
method='GET')
capabilities = response.object
return capabilities
def _parse_ips_from_nic(self, nic):
"""
Parse private and public IP addresses from the provided network
interface object.
:param nic: NIC object.
:type nic: ``dict``
:return: (public_ips, private_ips) tuple.
:rtype: ``tuple``
"""
public_ips, private_ips = [], []
ipv4_conf = nic['ip_v4_conf']
ipv6_conf = nic['ip_v6_conf']
ip_v4 = ipv4_conf['ip'] if ipv4_conf else None
ip_v6 = ipv6_conf['ip'] if ipv6_conf else None
ipv4 = ip_v4['uuid'] if ip_v4 else None
ipv6 = ip_v4['uuid'] if ip_v6 else None
ips = []
if ipv4:
ips.append(ipv4)
if ipv6:
ips.append(ipv6)
runtime = nic['runtime']
ip_v4 = runtime['ip_v4'] if nic['runtime'] else None
ip_v6 = runtime['ip_v6'] if nic['runtime'] else None
ipv4 = ip_v4['uuid'] if ip_v4 else None
ipv6 = ip_v4['uuid'] if ip_v6 else None
if ipv4:
ips.append(ipv4)
if ipv6:
ips.append(ipv6)
ips = set(ips)
for ip in ips:
if is_private_subnet(ip):
private_ips.append(ip)
else:
public_ips.append(ip)
return public_ips, private_ips
def _to_node(self, data):
extra_keys = ['cpu', 'mem', 'nics', 'vnc_password', 'meta']
id = data['uuid']
name = data['name']
state = self.NODE_STATE_MAP.get(data['status'], NodeState.UNKNOWN)
public_ips = []
private_ips = []
extra = self._extract_values(obj=data, keys=extra_keys)
for nic in data['nics']:
_public_ips, _private_ips = self._parse_ips_from_nic(nic=nic)
public_ips.extend(_public_ips)
private_ips.extend(_private_ips)
node = Node(id=id, name=name, state=state, public_ips=public_ips,
private_ips=private_ips, driver=self, extra=extra)
return node
def _to_image(self, data):
extra_keys = ['description', 'arch', 'image_type', 'os', 'licenses',
'media', 'meta']
id = data['uuid']
name = data['name']
extra = self._extract_values(obj=data, keys=extra_keys)
image = NodeImage(id=id, name=name, driver=self, extra=extra)
return image
def _to_drive(self, data):
id = data['uuid']
name = data['name']
size = data['size']
media = data['media']
status = data['status']
extra = {}
drive = CloudSigmaDrive(id=id, name=name, size=size, media=media,
status=status, driver=self, extra=extra)
return drive
def _to_tag(self, data):
resources = data['resources']
resources = [resource['uuid'] for resource in resources]
tag = CloudSigmaTag(id=data['uuid'], name=data['name'],
resources=resources)
return tag
def _to_subscriptions(self, data):
subscriptions = []
for item in data['objects']:
subscription = self._to_subscription(data=item)
subscriptions.append(subscription)
return subscriptions
def _to_subscription(self, data):
start_time = parse_date(data['start_time'])
end_time = parse_date(data['end_time'])
obj_uuid = data['subscribed_object']
subscription = CloudSigmaSubscription(id=data['id'],
resource=data['resource'],
amount=int(data['amount']),
period=data['period'],
status=data['status'],
price=data['price'],
start_time=start_time,
end_time=end_time,
auto_renew=data['auto_renew'],
subscribed_object=obj_uuid)
return subscription
def _to_firewall_policy(self, data):
rules = []
for item in data.get('rules', []):
rule = CloudSigmaFirewallPolicyRule(action=item['action'],
direction=item['direction'],
ip_proto=item['ip_proto'],
src_ip=item['src_ip'],
src_port=item['src_port'],
dst_ip=item['dst_ip'],
dst_port=item['dst_port'],
comment=item['comment'])
rules.append(rule)
policy = CloudSigmaFirewallPolicy(id=data['uuid'], name=data['name'],
rules=rules)
return policy
def _perform_action(self, path, action, method='POST', params=None,
data=None):
"""
Perform API action and return response object.
"""
if params:
params = params.copy()
else:
params = {}
params['do'] = action
response = self.connection.request(action=path, method=method,
params=params, data=data)
return response
def _is_installation_cd(self, image):
"""
Detect if the provided image is an installation CD.
:rtype: ``bool``
"""
if isinstance(image, CloudSigmaDrive) and image.media == 'cdrom':
return True
return False
def _extract_values(self, obj, keys):
"""
Extract values from a dictionary and return a new dictionary with
extracted values.
:param obj: Dictionary to extract values from.
:type obj: ``dict``
:param keys: Keys to extract.
:type keys: ``list``
:return: Dictionary with extracted values.
:rtype: ``dict``
"""
result = {}
for key in keys:
result[key] = obj[key]
return result
def _wait_for_drive_state_transition(self, drive, state,
timeout=DRIVE_TRANSITION_TIMEOUT):
"""
Wait for a drive to transition to the provided state.
Note: This function blocks and periodically calls "GET drive" endpoint
to check if the drive has already transitioned to the desired state.
:param drive: Drive to wait for.
:type drive: :class:`.CloudSigmaDrive`
:param state: Desired drive state.
:type state: ``str``
:param timeout: How long to wait for the transition (in seconds) before
timing out.
:type timeout: ``int``
:return: Drive object.
:rtype: :class:`.CloudSigmaDrive`
"""
start_time = time.time()
while drive.status != state:
drive = self.ex_get_drive(drive_id=drive.id)
if drive.status == state:
break
current_time = time.time()
delta = (current_time - start_time)
if delta >= timeout:
msg = ('Timed out while waiting for drive transition '
'(timeout=%s seconds)' % (timeout))
raise Exception(msg)
time.sleep(self.DRIVE_TRANSITION_SLEEP_INTERVAL)
return drive
def _ex_connection_class_kwargs(self):
"""
Return the host value based on the user supplied region.
"""
kwargs = {}
if not self._host_argument_set:
kwargs['host'] = API_ENDPOINTS_2_0[self.region]['host']
return kwargs
|
apache-2.0
|
imincik/pkg-qgis-1.8
|
python/plugins/fTools/tools/doRegPoints.py
|
2
|
7266
|
# -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import ftools_utils
from qgis.core import *
from random import *
from math import *
from ui_frmRegPoints import Ui_Dialog
class Dialog(QDialog, Ui_Dialog):
def __init__(self, iface):
QDialog.__init__(self, iface.mainWindow())
self.iface = iface
self.setupUi(self)
self.xMin.setValidator(QDoubleValidator(self.xMin))
self.xMax.setValidator(QDoubleValidator(self.xMax))
self.yMin.setValidator(QDoubleValidator(self.yMin))
self.yMax.setValidator(QDoubleValidator(self.yMax))
QObject.connect(self.toolOut, SIGNAL("clicked()"), self.outFile)
self.setWindowTitle( self.tr("Regular points") )
self.buttonOk = self.buttonBox_2.button( QDialogButtonBox.Ok )
self.progressBar.setValue(0)
self.mapCanvas = self.iface.mapCanvas()
self.populateLayers()
def populateLayers( self ):
layers = ftools_utils.getLayerNames("all")
self.inShape.clear()
self.inShape.addItems(layers)
def accept(self):
self.buttonOk.setEnabled( False )
if not self.rdoCoordinates.isChecked() and self.inShape.currentText() == "":
QMessageBox.information(self, self.tr("Generate Regular Points"), self.tr("Please specify input layer"))
elif self.rdoCoordinates.isChecked() and (self.xMin.text() == "" or self.xMax.text() == "" or self.yMin.text() == "" or self.yMax.text() == ""):
QMessageBox.information(self, self.tr("Generate Regular Points"), self.tr("Please properly specify extent coordinates"))
elif self.outShape.text() == "":
QMessageBox.information(self, self.tr("Generate Regular Points"), self.tr("Please specify output shapefile"))
else:
inName = self.inShape.currentText()
outPath = self.outShape.text()
self.outShape.clear()
if outPath.contains("\\"):
outName = outPath.right((outPath.length() - outPath.lastIndexOf("\\")) - 1)
else:
outName = outPath.right((outPath.length() - outPath.lastIndexOf("/")) - 1)
if outName.endsWith(".shp"):
outName = outName.left(outName.length() - 4)
if self.rdoSpacing.isChecked(): value = self.spnSpacing.value()
else: value = self.spnNumber.value()
if self.chkRandom.isChecked(): offset = True
else: offset = False
if self.rdoBoundary.isChecked():
mLayer = ftools_utils.getMapLayerByName(unicode(inName))
boundBox = mLayer.extent()
crs = mLayer.crs()
else:
boundBox = QgsRectangle(float(self.xMin.text()), float(self.yMin.text()), float(self.xMax.text()), float(self.yMax.text()))
crs = self.mapCanvas.mapRenderer().destinationSrs()
print crs.isValid()
if not crs.isValid(): crs = None
self.regularize(boundBox, outPath, offset, value, self.rdoSpacing.isChecked(), self.spnInset.value(), crs)
addToTOC = QMessageBox.question(self, self.tr("Generate Regular Points"), self.tr("Created output point shapefile:\n%1\n\nWould you like to add the new layer to the TOC?").arg( outPath ), QMessageBox.Yes, QMessageBox.No, QMessageBox.NoButton)
if addToTOC == QMessageBox.Yes:
self.vlayer = QgsVectorLayer(outPath, unicode(outName), "ogr")
QgsMapLayerRegistry.instance().addMapLayer(self.vlayer)
self.populateLayers()
self.progressBar.setValue(0)
self.buttonOk.setEnabled( True )
def outFile(self):
self.outShape.clear()
( self.shapefileName, self.encoding ) = ftools_utils.saveDialog( self )
if self.shapefileName is None or self.encoding is None:
return
self.outShape.setText( QString( self.shapefileName ) )
# Generate list of random points
def simpleRandom(self, n, bound, xmin, xmax, ymin, ymax):
seed()
points = []
i = 1
while i <= n:
pGeom = QgsGeometry().fromPoint(QgsPoint(xmin + (xmax-xmin) * random(), ymin + (ymax-ymin) * random()))
if pGeom.intersects(bound):
points.append(pGeom)
i = i + 1
return points
def regularize(self, bound, outPath, offset, value, gridType, inset, crs):
area = bound.width() * bound.height()
if offset:
seed()
if gridType:
pointSpacing = value
else:
# Calculate grid spacing
pointSpacing = sqrt(area / value)
outFeat = QgsFeature()
fields = { 0 : QgsField("ID", QVariant.Int) }
check = QFile(self.shapefileName)
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile(self.shapefileName):
return
writer = QgsVectorFileWriter(self.shapefileName, self.encoding, fields, QGis.WKBPoint, crs)
#writer = QgsVectorFileWriter(unicode(outPath), "CP1250", fields, QGis.WKBPoint, None)
idVar = 0
count = 10.00
add = 90.00 / (area / pointSpacing)
y = bound.yMaximum() - inset
while y >= bound.yMinimum():
x = bound.xMinimum() + inset
while x <= bound.xMaximum():
if offset:
pGeom = QgsGeometry().fromPoint(QgsPoint(uniform(x - (pointSpacing / 2.0), x + (pointSpacing / 2.0)),
uniform(y - (pointSpacing / 2.0), y + (pointSpacing / 2.0))))
else:
pGeom = QgsGeometry().fromPoint(QgsPoint(x, y))
if pGeom.intersects(bound):
outFeat.setGeometry(pGeom)
outFeat.addAttribute(0, QVariant(idVar))
writer.addFeature(outFeat)
idVar = idVar + 1
x = x + pointSpacing
count = count + add
self.progressBar.setValue(count)
y = y - pointSpacing
del writer
|
gpl-2.0
|
menardorama/ReadyNAS-Add-ons
|
headphones-1.0.0/files/apps/headphones/headphones/config.py
|
6
|
19704
|
import headphones.logger
import itertools
import os
import re
from configobj import ConfigObj
def bool_int(value):
"""
Casts a config value into a 0 or 1
"""
if isinstance(value, basestring):
if value.lower() in ('', '0', 'false', 'f', 'no', 'n', 'off'):
value = 0
return int(bool(value))
_CONFIG_DEFINITIONS = {
'ADD_ALBUM_ART': (int, 'General', 0),
'ADVANCEDENCODER': (str, 'General', ''),
'ALBUM_ART_FORMAT': (str, 'General', 'folder'),
# This is used in importer.py to determine how complete an album needs to
# be - to be considered "downloaded". Percentage from 0-100
'ALBUM_COMPLETION_PCT': (int, 'Advanced', 80),
'API_ENABLED': (int, 'General', 0),
'API_KEY': (str, 'General', ''),
'AUTOWANT_ALL': (int, 'General', 0),
'AUTOWANT_MANUALLY_ADDED': (int, 'General', 1),
'AUTOWANT_UPCOMING': (int, 'General', 1),
'AUTO_ADD_ARTISTS': (int, 'General', 1),
'BITRATE': (int, 'General', 192),
'BLACKHOLE': (int, 'General', 0),
'BLACKHOLE_DIR': (str, 'General', ''),
'BOXCAR_ENABLED': (int, 'Boxcar', 0),
'BOXCAR_ONSNATCH': (int, 'Boxcar', 0),
'BOXCAR_TOKEN': (str, 'Boxcar', ''),
'CACHE_DIR': (str, 'General', ''),
'CACHE_SIZEMB': (int, 'Advanced', 32),
'CHECK_GITHUB': (int, 'General', 1),
'CHECK_GITHUB_INTERVAL': (int, 'General', 360),
'CHECK_GITHUB_ON_STARTUP': (int, 'General', 1),
'CLEANUP_FILES': (int, 'General', 0),
'CONFIG_VERSION': (str, 'General', '0'),
'CORRECT_METADATA': (int, 'General', 0),
'CUE_SPLIT': (int, 'General', 1),
'CUE_SPLIT_FLAC_PATH': (str, 'General', ''),
'CUE_SPLIT_SHNTOOL_PATH': (str, 'General', ''),
'CUSTOMAUTH': (int, 'General', 0),
'CUSTOMHOST': (str, 'General', 'localhost'),
'CUSTOMPASS': (str, 'General', ''),
'CUSTOMPORT': (int, 'General', 5000),
'CUSTOMSLEEP': (int, 'General', 1),
'CUSTOMUSER': (str, 'General', ''),
'DELETE_LOSSLESS_FILES': (int, 'General', 1),
'DESTINATION_DIR': (str, 'General', ''),
'DETECT_BITRATE': (int, 'General', 0),
'DO_NOT_PROCESS_UNMATCHED': (int, 'General', 0),
'DOWNLOAD_DIR': (str, 'General', ''),
'DOWNLOAD_SCAN_INTERVAL': (int, 'General', 5),
'DOWNLOAD_TORRENT_DIR': (str, 'General', ''),
'DO_NOT_OVERRIDE_GIT_BRANCH': (int, 'General', 0),
'EMAIL_ENABLED': (int, 'Email', 0),
'EMAIL_FROM': (str, 'Email', ''),
'EMAIL_TO': (str, 'Email', ''),
'EMAIL_SMTP_SERVER': (str, 'Email', ''),
'EMAIL_SMTP_USER': (str, 'Email', ''),
'EMAIL_SMTP_PASSWORD': (str, 'Email', ''),
'EMAIL_SMTP_PORT': (int, 'Email', 25),
'EMAIL_SSL': (int, 'Email', 0),
'EMAIL_TLS': (int, 'Email', 0),
'EMAIL_ONSNATCH': (int, 'Email', 0),
'EMBED_ALBUM_ART': (int, 'General', 0),
'EMBED_LYRICS': (int, 'General', 0),
'ENABLE_HTTPS': (int, 'General', 0),
'ENCODER': (str, 'General', 'ffmpeg'),
'ENCODERFOLDER': (str, 'General', ''),
'ENCODERLOSSLESS': (int, 'General', 1),
'ENCODEROUTPUTFORMAT': (str, 'General', 'mp3'),
'ENCODERQUALITY': (int, 'General', 2),
'ENCODERVBRCBR': (str, 'General', 'cbr'),
'ENCODER_MULTICORE': (int, 'General', 0),
'ENCODER_MULTICORE_COUNT': (int, 'General', 0),
'ENCODER_PATH': (str, 'General', ''),
'EXTRAS': (str, 'General', ''),
'EXTRA_NEWZNABS': (list, 'Newznab', ''),
'EXTRA_TORZNABS': (list, 'Torznab', ''),
'FILE_FORMAT': (str, 'General', 'Track Artist - Album [Year] - Title'),
'FILE_PERMISSIONS': (str, 'General', '0644'),
'FILE_UNDERSCORES': (int, 'General', 0),
'FOLDER_FORMAT': (str, 'General', 'Artist/Album [Year]'),
'FOLDER_PERMISSIONS': (str, 'General', '0755'),
'FREEZE_DB': (int, 'General', 0),
'GIT_BRANCH': (str, 'General', 'master'),
'GIT_PATH': (str, 'General', ''),
'GIT_USER': (str, 'General', 'rembo10'),
'GROWL_ENABLED': (int, 'Growl', 0),
'GROWL_HOST': (str, 'Growl', ''),
'GROWL_ONSNATCH': (int, 'Growl', 0),
'GROWL_PASSWORD': (str, 'Growl', ''),
'HEADPHONES_INDEXER': (bool_int, 'General', False),
'HPPASS': (str, 'General', ''),
'HPUSER': (str, 'General', ''),
'HTTPS_CERT': (str, 'General', ''),
'HTTPS_KEY': (str, 'General', ''),
'HTTP_HOST': (str, 'General', 'localhost'),
'HTTP_PASSWORD': (str, 'General', ''),
'HTTP_PORT': (int, 'General', 8181),
'HTTP_PROXY': (int, 'General', 0),
'HTTP_ROOT': (str, 'General', '/'),
'HTTP_USERNAME': (str, 'General', ''),
'IDTAG': (int, 'Beets', 0),
'IGNORE_CLEAN_RELEASES': (int, 'General', 0),
'IGNORED_WORDS': (str, 'General', ''),
'IGNORED_FOLDERS': (list, 'Advanced', []),
'IGNORED_FILES': (list, 'Advanced', []),
'INCLUDE_EXTRAS': (int, 'General', 0),
'INTERFACE': (str, 'General', 'default'),
'JOURNAL_MODE': (str, 'Advanced', 'wal'),
'KAT': (int, 'Kat', 0),
'KAT_PROXY_URL': (str, 'Kat', ''),
'KAT_RATIO': (str, 'Kat', ''),
'KEEP_NFO': (int, 'General', 0),
'KEEP_TORRENT_FILES': (int, 'General', 0),
'LASTFM_USERNAME': (str, 'General', ''),
'LAUNCH_BROWSER': (int, 'General', 1),
'LIBRARYSCAN': (int, 'General', 1),
'LIBRARYSCAN_INTERVAL': (int, 'General', 300),
'LMS_ENABLED': (int, 'LMS', 0),
'LMS_HOST': (str, 'LMS', ''),
'LOG_DIR': (str, 'General', ''),
'LOSSLESS_BITRATE_FROM': (int, 'General', 0),
'LOSSLESS_BITRATE_TO': (int, 'General', 0),
'LOSSLESS_DESTINATION_DIR': (str, 'General', ''),
'MB_IGNORE_AGE': (int, 'General', 365),
'MININOVA': (int, 'Mininova', 0),
'MININOVA_RATIO': (str, 'Mininova', ''),
'MIRROR': (str, 'General', 'musicbrainz.org'),
'MOVE_FILES': (int, 'General', 0),
'MPC_ENABLED': (bool_int, 'MPC', False),
'MUSIC_DIR': (str, 'General', ''),
'MUSIC_ENCODER': (int, 'General', 0),
'NEWZNAB': (int, 'Newznab', 0),
'NEWZNAB_APIKEY': (str, 'Newznab', ''),
'NEWZNAB_ENABLED': (int, 'Newznab', 1),
'NEWZNAB_HOST': (str, 'Newznab', ''),
'NMA_APIKEY': (str, 'NMA', ''),
'NMA_ENABLED': (int, 'NMA', 0),
'NMA_ONSNATCH': (int, 'NMA', 0),
'NMA_PRIORITY': (int, 'NMA', 0),
'NUMBEROFSEEDERS': (str, 'General', '10'),
'NZBGET_CATEGORY': (str, 'NZBget', ''),
'NZBGET_HOST': (str, 'NZBget', ''),
'NZBGET_PASSWORD': (str, 'NZBget', ''),
'NZBGET_PRIORITY': (int, 'NZBget', 0),
'NZBGET_USERNAME': (str, 'NZBget', 'nzbget'),
'NZBSORG': (int, 'NZBsorg', 0),
'NZBSORG_HASH': (str, 'NZBsorg', ''),
'NZBSORG_UID': (str, 'NZBsorg', ''),
'NZB_DOWNLOADER': (int, 'General', 0),
'OFFICIAL_RELEASES_ONLY': (int, 'General', 0),
'OMGWTFNZBS': (int, 'omgwtfnzbs', 0),
'OMGWTFNZBS_APIKEY': (str, 'omgwtfnzbs', ''),
'OMGWTFNZBS_UID': (str, 'omgwtfnzbs', ''),
'OPEN_MAGNET_LINKS': (int, 'General', 0), # 0: Ignore, 1: Open, 2: Convert
'MAGNET_LINKS': (int, 'General', 0),
'OSX_NOTIFY_APP': (str, 'OSX_Notify', '/Applications/Headphones'),
'OSX_NOTIFY_ENABLED': (int, 'OSX_Notify', 0),
'OSX_NOTIFY_ONSNATCH': (int, 'OSX_Notify', 0),
'PIRATEBAY': (int, 'Piratebay', 0),
'PIRATEBAY_PROXY_URL': (str, 'Piratebay', ''),
'PIRATEBAY_RATIO': (str, 'Piratebay', ''),
'OLDPIRATEBAY': (int, 'Old Piratebay', 0),
'OLDPIRATEBAY_URL': (str, 'Old Piratebay', ''),
'OLDPIRATEBAY_RATIO': (str, 'Old Piratebay', ''),
'PLEX_CLIENT_HOST': (str, 'Plex', ''),
'PLEX_ENABLED': (int, 'Plex', 0),
'PLEX_NOTIFY': (int, 'Plex', 0),
'PLEX_PASSWORD': (str, 'Plex', ''),
'PLEX_SERVER_HOST': (str, 'Plex', ''),
'PLEX_UPDATE': (int, 'Plex', 0),
'PLEX_USERNAME': (str, 'Plex', ''),
'PLEX_TOKEN': (str, 'Plex', ''),
'PREFERRED_BITRATE': (str, 'General', ''),
'PREFERRED_BITRATE_ALLOW_LOSSLESS': (int, 'General', 0),
'PREFERRED_BITRATE_HIGH_BUFFER': (int, 'General', 0),
'PREFERRED_BITRATE_LOW_BUFFER': (int, 'General', 0),
'PREFERRED_QUALITY': (int, 'General', 0),
'PREFERRED_WORDS': (str, 'General', ''),
'PREFER_TORRENTS': (int, 'General', 0),
'PROWL_ENABLED': (int, 'Prowl', 0),
'PROWL_KEYS': (str, 'Prowl', ''),
'PROWL_ONSNATCH': (int, 'Prowl', 0),
'PROWL_PRIORITY': (int, 'Prowl', 0),
'PUSHALOT_APIKEY': (str, 'Pushalot', ''),
'PUSHALOT_ENABLED': (int, 'Pushalot', 0),
'PUSHALOT_ONSNATCH': (int, 'Pushalot', 0),
'PUSHBULLET_APIKEY': (str, 'PushBullet', ''),
'PUSHBULLET_DEVICEID': (str, 'PushBullet', ''),
'PUSHBULLET_ENABLED': (int, 'PushBullet', 0),
'PUSHBULLET_ONSNATCH': (int, 'PushBullet', 0),
'PUSHOVER_APITOKEN': (str, 'Pushover', ''),
'PUSHOVER_ENABLED': (int, 'Pushover', 0),
'PUSHOVER_KEYS': (str, 'Pushover', ''),
'PUSHOVER_ONSNATCH': (int, 'Pushover', 0),
'PUSHOVER_PRIORITY': (int, 'Pushover', 0),
'RENAME_FILES': (int, 'General', 0),
'REPLACE_EXISTING_FOLDERS': (int, 'General', 0),
'KEEP_ORIGINAL_FOLDER': (int, 'General', 0),
'REQUIRED_WORDS': (str, 'General', ''),
'RUTRACKER': (int, 'Rutracker', 0),
'RUTRACKER_PASSWORD': (str, 'Rutracker', ''),
'RUTRACKER_RATIO': (str, 'Rutracker', ''),
'RUTRACKER_USER': (str, 'Rutracker', ''),
'SAB_APIKEY': (str, 'SABnzbd', ''),
'SAB_CATEGORY': (str, 'SABnzbd', ''),
'SAB_HOST': (str, 'SABnzbd', ''),
'SAB_PASSWORD': (str, 'SABnzbd', ''),
'SAB_USERNAME': (str, 'SABnzbd', ''),
'SAMPLINGFREQUENCY': (int, 'General', 44100),
'SEARCH_INTERVAL': (int, 'General', 1440),
'SONGKICK_APIKEY': (str, 'Songkick', 'nd1We7dFW2RqxPw8'),
'SONGKICK_ENABLED': (int, 'Songkick', 1),
'SONGKICK_FILTER_ENABLED': (int, 'Songkick', 0),
'SONGKICK_LOCATION': (str, 'Songkick', ''),
'STRIKE': (int, 'Strike', 0),
'STRIKE_RATIO': (str, 'Strike', ''),
'SUBSONIC_ENABLED': (int, 'Subsonic', 0),
'SUBSONIC_HOST': (str, 'Subsonic', ''),
'SUBSONIC_PASSWORD': (str, 'Subsonic', ''),
'SUBSONIC_USERNAME': (str, 'Subsonic', ''),
'SYNOINDEX_ENABLED': (int, 'Synoindex', 0),
'TORRENTBLACKHOLE_DIR': (str, 'General', ''),
'TORRENT_DOWNLOADER': (int, 'General', 0),
'TORRENT_REMOVAL_INTERVAL': (int, 'General', 720),
'TORZNAB': (int, 'Torznab', 0),
'TORZNAB_APIKEY': (str, 'Torznab', ''),
'TORZNAB_ENABLED': (int, 'Torznab', 1),
'TORZNAB_HOST': (str, 'Torznab', ''),
'TRANSMISSION_HOST': (str, 'Transmission', ''),
'TRANSMISSION_PASSWORD': (str, 'Transmission', ''),
'TRANSMISSION_USERNAME': (str, 'Transmission', ''),
'TWITTER_ENABLED': (int, 'Twitter', 0),
'TWITTER_ONSNATCH': (int, 'Twitter', 0),
'TWITTER_PASSWORD': (str, 'Twitter', ''),
'TWITTER_PREFIX': (str, 'Twitter', 'Headphones'),
'TWITTER_USERNAME': (str, 'Twitter', ''),
'UPDATE_DB_INTERVAL': (int, 'General', 24),
'USENET_RETENTION': (int, 'General', '1500'),
'UTORRENT_HOST': (str, 'uTorrent', ''),
'UTORRENT_LABEL': (str, 'uTorrent', ''),
'UTORRENT_PASSWORD': (str, 'uTorrent', ''),
'UTORRENT_USERNAME': (str, 'uTorrent', ''),
'VERIFY_SSL_CERT': (bool_int, 'Advanced', 1),
'WAIT_UNTIL_RELEASE_DATE' : (int, 'General', 0),
'WAFFLES': (int, 'Waffles', 0),
'WAFFLES_PASSKEY': (str, 'Waffles', ''),
'WAFFLES_RATIO': (str, 'Waffles', ''),
'WAFFLES_UID': (str, 'Waffles', ''),
'WHATCD': (int, 'What.cd', 0),
'WHATCD_PASSWORD': (str, 'What.cd', ''),
'WHATCD_RATIO': (str, 'What.cd', ''),
'WHATCD_USERNAME': (str, 'What.cd', ''),
'XBMC_ENABLED': (int, 'XBMC', 0),
'XBMC_HOST': (str, 'XBMC', ''),
'XBMC_NOTIFY': (int, 'XBMC', 0),
'XBMC_PASSWORD': (str, 'XBMC', ''),
'XBMC_UPDATE': (int, 'XBMC', 0),
'XBMC_USERNAME': (str, 'XBMC', ''),
'XLDPROFILE': (str, 'General', '')
}
# pylint:disable=R0902
# it might be nice to refactor for fewer instance variables
class Config(object):
""" Wraps access to particular values in a config file """
def __init__(self, config_file):
""" Initialize the config with values from a file """
self._config_file = config_file
self._config = ConfigObj(self._config_file, encoding='utf-8')
for key in _CONFIG_DEFINITIONS.keys():
self.check_setting(key)
self.ENCODER_MULTICORE_COUNT = max(0, self.ENCODER_MULTICORE_COUNT)
self._upgrade()
def _define(self, name):
key = name.upper()
ini_key = name.lower()
definition = _CONFIG_DEFINITIONS[key]
if len(definition) == 3:
definition_type, section, default = definition
else:
definition_type, section, _, default = definition
return key, definition_type, section, ini_key, default
def check_section(self, section):
""" Check if INI section exists, if not create it """
if section not in self._config:
self._config[section] = {}
return True
else:
return False
def check_setting(self, key):
""" Cast any value in the config to the right type or use the default """
key, definition_type, section, ini_key, default = self._define(key)
self.check_section(section)
try:
my_val = definition_type(self._config[section][ini_key])
except Exception:
my_val = definition_type(default)
self._config[section][ini_key] = my_val
return my_val
def write(self):
""" Make a copy of the stored config and write it to the configured file """
new_config = ConfigObj(encoding="UTF-8")
new_config.filename = self._config_file
# first copy over everything from the old config, even if it is not
# correctly defined to keep from losing data
for key, subkeys in self._config.items():
if key not in new_config:
new_config[key] = {}
for subkey, value in subkeys.items():
new_config[key][subkey] = value
# next make sure that everything we expect to have defined is so
for key in _CONFIG_DEFINITIONS.keys():
key, definition_type, section, ini_key, default = self._define(key)
self.check_setting(key)
if section not in new_config:
new_config[section] = {}
new_config[section][ini_key] = self._config[section][ini_key]
# Write it to file
headphones.logger.info("Writing configuration to file")
try:
new_config.write()
except IOError as e:
headphones.logger.error("Error writing configuration file: %s", e)
def get_extra_newznabs(self):
""" Return the extra newznab tuples """
extra_newznabs = list(
itertools.izip(*[itertools.islice(self.EXTRA_NEWZNABS, i, None, 3)
for i in range(3)])
)
return extra_newznabs
def clear_extra_newznabs(self):
""" Forget about the configured extra newznabs """
self.EXTRA_NEWZNABS = []
def add_extra_newznab(self, newznab):
""" Add a new extra newznab """
extra_newznabs = self.EXTRA_NEWZNABS
for item in newznab:
extra_newznabs.append(item)
self.EXTRA_NEWZNABS = extra_newznabs
def get_extra_torznabs(self):
""" Return the extra torznab tuples """
extra_torznabs = list(
itertools.izip(*[itertools.islice(self.EXTRA_TORZNABS, i, None, 3)
for i in range(3)])
)
return extra_torznabs
def clear_extra_torznabs(self):
""" Forget about the configured extra torznabs """
self.EXTRA_TORZNABS = []
def add_extra_torznab(self, torznab):
""" Add a new extra torznab """
extra_torznabs = self.EXTRA_TORZNABS
for item in torznab:
extra_torznabs.append(item)
self.EXTRA_TORZNABS = extra_torznabs
def __getattr__(self, name):
"""
Returns something from the ini unless it is a real property
of the configuration object or is not all caps.
"""
if not re.match(r'[A-Z_]+$', name):
return super(Config, self).__getattr__(name)
else:
return self.check_setting(name)
def __setattr__(self, name, value):
"""
Maps all-caps properties to ini values unless they exist on the
configuration object.
"""
if not re.match(r'[A-Z_]+$', name):
super(Config, self).__setattr__(name, value)
return value
else:
key, definition_type, section, ini_key, default = self._define(name)
self._config[section][ini_key] = definition_type(value)
return self._config[section][ini_key]
def process_kwargs(self, kwargs):
"""
Given a big bunch of key value pairs, apply them to the ini.
"""
for name, value in kwargs.items():
key, definition_type, section, ini_key, default = self._define(name)
self._config[section][ini_key] = definition_type(value)
def _upgrade(self):
""" Update folder formats in the config & bump up config version """
if self.CONFIG_VERSION == '0':
from headphones.helpers import replace_all
file_values = {
'tracknumber': 'Track',
'title': 'Title',
'artist': 'Artist',
'album': 'Album',
'year': 'Year'
}
folder_values = {
'artist': 'Artist',
'album': 'Album',
'year': 'Year',
'releasetype': 'Type',
'first': 'First',
'lowerfirst': 'first'
}
self.FILE_FORMAT = replace_all(self.FILE_FORMAT, file_values)
self.FOLDER_FORMAT = replace_all(self.FOLDER_FORMAT, folder_values)
self.CONFIG_VERSION = '1'
if self.CONFIG_VERSION == '1':
from headphones.helpers import replace_all
file_values = {
'Track': '$Track',
'Title': '$Title',
'Artist': '$Artist',
'Album': '$Album',
'Year': '$Year',
'track': '$track',
'title': '$title',
'artist': '$artist',
'album': '$album',
'year': '$year'
}
folder_values = {
'Artist': '$Artist',
'Album': '$Album',
'Year': '$Year',
'Type': '$Type',
'First': '$First',
'artist': '$artist',
'album': '$album',
'year': '$year',
'type': '$type',
'first': '$first'
}
self.FILE_FORMAT = replace_all(self.FILE_FORMAT, file_values)
self.FOLDER_FORMAT = replace_all(self.FOLDER_FORMAT, folder_values)
self.CONFIG_VERSION = '2'
if self.CONFIG_VERSION == '2':
# Update the config to use direct path to the encoder rather than the encoder folder
if self.ENCODERFOLDER:
self.ENCODER_PATH = os.path.join(self.ENCODERFOLDER, self.ENCODER)
self.CONFIG_VERSION = '3'
if self.CONFIG_VERSION == '3':
# Update the BLACKHOLE option to the NZB_DOWNLOADER format
if self.BLACKHOLE:
self.NZB_DOWNLOADER = 2
self.CONFIG_VERSION = '4'
# Enable Headphones Indexer if they have a VIP account
if self.CONFIG_VERSION == '4':
if self.HPUSER and self.HPPASS:
self.HEADPHONES_INDEXER = True
self.CONFIG_VERSION = '5'
if self.CONFIG_VERSION == '5':
if self.OPEN_MAGNET_LINKS:
self.MAGNET_LINKS = 2
self.CONFIG_VERSION = '5'
|
gpl-2.0
|
will-Do/tp-libvirt_v2v
|
libguestfs/tests/guestfish_lvm.py
|
7
|
25965
|
from autotest.client.shared import error, utils
from virttest import utils_test, utils_misc, data_dir
from virttest.tests import unattended_install
import logging
import shutil
import os
import re
def prepare_image(params):
"""
(1) Create a image
(2) Create file system on the image
"""
params["image_path"] = utils_test.libguestfs.preprocess_image(params)
if not params.get("image_path"):
raise error.TestFail("Image could not be created for some reason.")
gf = utils_test.libguestfs.GuestfishTools(params)
status, output = gf.create_fs()
if status is False:
gf.close_session()
raise error.TestFail(output)
gf.close_session()
def create_lvm(gf, mode, pv_name="/dev/sda", vg_name="VG", lv_name="LV", size=100):
if mode == 'pvcreate':
gf.part_init(pv_name, "msdos")
gf.pvcreate(pv_name)
ret = gf.pvs().stdout.strip()
if not ret:
gf.close_session()
raise error.TestFail("create PV failed")
return ret
elif mode == 'vgcreate':
gf.vgcreate(vg_name, pv_name)
ret = gf.vgs().stdout.strip()
if not ret:
gf.close_session()
raise error.TestFail("create VG failed")
return ret
elif mode == 'lvcreate':
gf.lvcreate(lv_name, vg_name, size)
ret = gf.lvs().stdout.strip()
if not ret:
gf.close_session()
raise error.TestFail("create LV failed")
return ret
else:
logging.info("mode should be 'pvcreate','vgcreate' or 'lvcreate'")
def test_is_lv(vm, params):
"""
Test command is-lv
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
# check physical device
name = gf.list_partitions().stdout.strip()
ret = gf.is_lv(name).stdout.strip()
if ret != "false":
gf.close_session()
raise error.TestFail("It should be a physical device")
# check lvm device
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
name = gf.lvs().stdout.strip()
ret = gf.is_lv(name).stdout.strip()
if ret != "true":
gf.close_session()
raise error.TestFail("It should be a lvm device")
gf.close_session()
def test_lvcreate(vm, params):
"""
Test command lvcreate
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
pv_name = params.get("pv_name")
vg_name = "myvg"
lv_name = "mylv"
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate', vg_name=vg_name)
create_lvm(gf, 'lvcreate', vg_name=vg_name, lv_name=lv_name)
part_name = "/dev/%s/%s" % (vg_name, lv_name)
result = gf.lvs().stdout.strip()
if result != part_name:
gf.close_session()
raise error.TestFail("lv name is not match")
result = gf.lvs_full().stdout.strip()
result = re.search("lv_name:\s+(\S+)", result).groups()[0]
if result != lv_name:
gf.close_session()
raise error.TestFail("lv name is not match")
gf.close_session()
def test_lvm_canonical_lv_name(vm, params):
"""
Test command lvm-canonical-lv-name
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
pv_name = params.get("pv_name")
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
real_name = gf.lvs().stdout.strip()
vg_name, lv_name = real_name.split("/")[-2:]
test_name = "/dev/mapper/%s-%s" % (vg_name, lv_name)
result = gf.lvm_canonical_lv_name(test_name).stdout.strip()
logging.debug(result)
if result != real_name:
gf.close_session()
raise error.TestFail("Return name is uncorrect")
gf.close_session()
def test_lvremove(vm, params):
"""
Test command lvremove
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
ret = gf.lvs().stdout.strip()
logging.debug(ret)
if ret:
gf.lvremove(ret)
ret = gf.lvs().stdout.strip()
if ret:
gf.close_session()
raise error.TestFail("LV can't be removed")
gf.close_session()
def test_lvm_remove_all(vm, params):
"""
Test command lvm-remove-all
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
pv = gf.pvs().stdout.strip()
vg = gf.vgs().stdout.strip()
lv = gf.lvs().stdout.strip()
logging.debug("pv: %s\n vg:%s\n lv:%s\n" % (pv, vg, lv))
if pv and vg and lv:
gf.lvm_remove_all()
pv = gf.pvs().stdout.strip()
vg = gf.vgs().stdout.strip()
lv = gf.lvs().stdout.strip()
logging.debug("pv: %s\n vg:%s\n lv:%s\n" % (pv, vg, lv))
if pv or vg or lv:
gf.close_session()
raise error.TestFail("lvm-remove-all failed")
gf.close_session()
def test_lvrename(vm, params):
"""
Test command lvrename
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
ret = gf.lvs().stdout.strip()
logging.debug(ret)
if ret:
new_lv_name = "newlv"
gf.lvrename(ret, new_lv_name)
ret = gf.lvs().stdout.strip()
if new_lv_name not in ret:
gf.close_session()
raise error.TestFail("LV can't be renamed")
gf.close_session()
def test_lvresize(vm, params):
"""
Test command lvresize
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
ret = gf.lvs_full().stdout.strip()
lv = gf.lvs().stdout.strip()
old_size = re.search("lv_size:\s+(\S+)", ret).groups()[0]
ret = gf.lvresize(lv, 200)
if ret.exit_status:
gf.close_session()
raise error.TestFail("lvresize execute failed")
ret = gf.lvs_full().stdout.strip()
new_size = re.search("lv_size:\s+(\S+)", ret).groups()[0]
logging.debug("old_size is %s, new_size is %s" % (old_size, new_size))
if new_size <= old_size:
gf.close_session()
raise error.TestFail("lvresize failed")
gf.close_session()
def test_lvresize_free(vm, params):
"""
Test command lvresize-free
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate', size=200)
lv = gf.lvs().stdout.strip()
ret = gf.lvs_full().stdout.strip()
old_size = re.search("lv_size:\s+(\S+)", ret).groups()[0]
ret = gf.vgs_full().stdout.strip()
max_size = re.search("vg_size:\s+(\S+)", ret).groups()[0]
ret = gf.lvresize_free(lv, 100)
if ret.exit_status:
gf.close_session()
raise error.TestFail("lvresize-free execute failed")
ret = gf.lvs_full().stdout.strip()
new_size = re.search("lv_size:\s+(\S+)", ret).groups()[0]
logging.debug("old_size is %s, new_size is %s" % (old_size, new_size))
if new_size != max_size:
gf.close_session()
raise error.TestFail("lv_size should be %s" % max_size)
gf.close_session()
def test_lvm_set_filter(vm, params):
"""
Test command lvm-set-filter and lvm-clear-filter
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
lv_name = gf.lvs().stdout.strip()
if not lv_name:
gf.close_session()
raise error.TestFail("LV should be listed")
# set filter, lvm device should be hided
gf.lvm_set_filter(lv_name)
lv_name = gf.lvs().stdout.strip()
if lv_name:
gf.close_session()
raise error.TestFail("LV should not be listed")
# clear the filter, lvm device can be seen
gf.lvm_clear_filter()
lv_name = gf.lvs().stdout.strip()
if not lv_name:
gf.close_session()
raise error.TestFail("LV should be listed")
gf.close_session()
def test_lvuuid(vm, params):
"""
Test command lvuuid
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
lv_name = gf.lvs().stdout.strip()
uuid = gf.lvuuid(lv_name).stdout.strip()
uuid = re.sub("-", "", uuid)
logging.debug("uuid from lvuuid is %s" % uuid)
ret = gf.lvs_full().stdout.strip()
result = re.search("lv_uuid:\s+(\S+)", ret).groups()[0]
logging.debug("uuid from lvs-full is %s" % result)
if uuid != result:
gf.close_session()
raise error.TestFail("lv uuid is not match")
gf.close_session()
def test_vgcreate(vm, params):
"""
Test command vgcreate
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
pv_name = params.get("pv_name")
vg_name = "myvg"
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate', vg_name=vg_name)
result = gf.vgs().stdout.strip()
if result != vg_name:
gf.close_session()
raise error.TestFail("vg name is not match")
ret = gf.vgs_full().stdout.strip()
result = re.search("vg_name:\s+(\S+)", ret).groups()[0]
if result != vg_name:
gf.close_session()
raise error.TestFail("vg name is not match")
gf.close_session()
def test_vgremove(vm, params):
"""
Test command vgremove
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
ret = gf.vgs().stdout.strip()
logging.debug(ret)
if ret:
gf.vgremove(ret)
ret = gf.vgs().stdout.strip()
if ret:
gf.close_session()
raise error.TestFail("VG can't be removed")
gf.close_session()
def test_vgrename(vm, params):
"""
Test command vgrename
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
ret = gf.vgs().stdout.strip()
logging.debug(ret)
if ret:
new_vg_name = "newvg"
gf.vgrename(ret, new_vg_name)
ret = gf.vgs().stdout.strip()
if new_vg_name not in ret:
gf.close_session()
raise error.TestFail("VG can't be renamed")
gf.close_session()
def test_vgscan(vm, params):
"""
Test command vgscan
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
result = gf.vgscan()
if result.exit_status:
gf.close_session()
raise error.TestFail("vgscan execute failed")
gf.close_session()
def test_vguuid(vm, params):
"""
Test command vguuid
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
vg_name = gf.vgs().stdout.strip()
uuid = gf.vguuid(vg_name).stdout.strip()
uuid = re.sub("-", "", uuid)
logging.debug("uuid from vguuid is %s" % uuid)
ret = gf.vgs_full().stdout.strip()
result = re.search("vg_uuid:\s+(\S+)", ret).groups()[0]
logging.debug("uuid from lvs-full is %s" % result)
if uuid != result:
gf.close_session()
raise error.TestFail("vg uuid is not match")
gf.close_session()
def test_vg_activate(vm, params):
"""
Test command vg-activate
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
vg_name = gf.vgs().stdout.strip()
result = gf.debug("ls", "/dev").stdout.strip()
if vg_name not in result:
gf.close_session()
raise error.TestFail("Can not find %s in /dev" % vg_name)
gf.vg_activate(0, vg_name)
result = gf.debug("ls", "/dev").stdout.strip()
if vg_name in result:
gf.close_session()
raise error.TestFail("Find %s in /dev, it shouldn't be" % vg_name)
gf.vg_activate(1, vg_name)
result = gf.debug("ls", "/dev").stdout.strip()
if vg_name not in result:
gf.close_session()
raise error.TestFail("Can not find %s in /dev" % vg_name)
gf.close_session()
def test_vg_activate_all(vm, params):
"""
Test command vg-activate-all
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
vg_name = gf.vgs().stdout.strip()
result = gf.debug("ls", "/dev").stdout.strip()
if vg_name not in result:
gf.close_session()
raise error.TestFail("Can not find %s in /dev" % vg_name)
gf.vg_activate_all(0)
result = gf.debug("ls", "/dev").stdout.strip()
if vg_name in result:
gf.close_session()
raise error.TestFail("Find %s in /dev, it shouldn't be" % vg_name)
gf.vg_activate_all(1)
result = gf.debug("ls", "/dev").stdout.strip()
if vg_name not in result:
gf.close_session()
raise error.TestFail("Can not find %s in /dev" % vg_name)
gf.close_session()
def test_vglvuuids(vm, params):
"""
Test command vglvuuids
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
lv_name = gf.lvs().stdout.strip()
uuid = gf.lvuuid(lv_name).stdout.strip()
result = gf.vglvuuids('VG').stdout.strip()
if uuid != result:
gf.close_session()
raise error.TestFail("lv uuid is not match")
gf.close_session()
def test_vgpvuuids(vm, params):
"""
Test command vgpvuuids
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
create_lvm(gf, 'vgcreate')
create_lvm(gf, 'lvcreate')
pv_name = gf.pvs().stdout.strip()
uuid = gf.pvuuid(pv_name).stdout.strip()
result = gf.vgpvuuids('VG').stdout.strip()
if uuid != result:
gf.close_session()
raise error.TestFail("pv uuid is not match")
gf.close_session()
def test_pvcreate(vm, params):
"""
Test command pvcreate
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
pv_name = gf.pvs().stdout.strip()
result = gf.pvs_full().stdout.strip()
result = re.search("pv_name:\s+(\S+)", result).groups()[0]
if result != pv_name != "/dev/sda":
gf.close_session()
raise error.TestFail("pv name is not match")
gf.close_session()
def test_pvremove(vm, params):
"""
Test command pvremove
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
pv_name = gf.pvs().stdout.strip()
if pv_name != "/dev/sda":
gf.close_session()
raise error.TestFail("pv name is not match")
gf.pvremove('/dev/sda')
pv_name = gf.pvs().stdout.strip()
if pv_name:
gf.close_session()
raise error.TestFail("remove pv failed")
gf.close_session()
def test_pvresize(vm, params):
"""
Test command pvresize and pvresize-size
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
result = gf.pvs_full().stdout.strip()
pv_size = re.search("pv_size:\s+(\S+)", result).groups()[0]
new_size = pv_size[:-1]
gf.pvresize_size("/dev/sda", new_size)
result = gf.pvs_full().stdout.strip()
get_size = re.search("pv_size:\s+(\S+)", result).groups()[0]
if get_size != new_size:
gf.close_session()
raise error.TestFail("Can not get correct size via pvresize-size")
gf.pvresize("/dev/sda")
result = gf.pvs_full().stdout.strip()
get_size = re.search("pv_size:\s+(\S+)", result).groups()[0]
if get_size != pv_size:
gf.close_session()
raise error.TestFail("Can not get correct size via pvresize-size")
gf.close_session()
def test_pvuuid(vm, params):
"""
Test command pvuuid
"""
add_ref = params.get("gf_add_ref", "disk")
readonly = "yes" == params.get("gf_add_readonly")
gf = utils_test.libguestfs.GuestfishTools(params)
if add_ref == "disk":
image_path = params.get("image_path")
gf.add_drive_opts(image_path, readonly=readonly)
elif add_ref == "domain":
vm_name = params.get("main_vm")
gf.add_domain(vm_name, readonly=readonly)
gf.run()
create_lvm(gf, 'pvcreate')
pv_name = gf.pvs().stdout.strip()
uuid = gf.pvuuid(pv_name).stdout.strip()
uuid = re.sub("-", "", uuid)
logging.debug("uuid from pvuuid is %s" % uuid)
ret = gf.pvs_full().stdout.strip()
result = re.search("pv_uuid:\s+(\S+)", ret).groups()[0]
logging.debug("uuid from pvs-full is %s" % result)
if uuid != result:
gf.close_session()
raise error.TestFail("pv uuid is not match")
gf.close_session()
def run(test, params, env):
"""
Test of built-in lvm related commands in guestfish.
1) Get parameters for test
2) Set options for commands
3) Run key commands:
a.add disk or domain with readonly or not
b.launch
c.mount root device
4) Write a file to help result checking
5) Check result
"""
vm_name = params.get("main_vm")
vm = env.get_vm(vm_name)
if vm.is_alive():
vm.destroy()
operation = params.get("guestfish_function")
testcase = globals()["test_%s" % operation]
partition_types = params.get("partition_types")
fs_types = params.get("fs_types")
image_formats = params.get("image_formats")
for image_format in re.findall("\w+", image_formats):
params["image_format"] = image_format
for partition_type in re.findall("\w+", partition_types):
params["partition_type"] = partition_type
prepare_image(params)
testcase(vm, params)
|
gpl-2.0
|
godfather1103/WeiboRobot
|
python27/1.0/lib/unittest/loader.py
|
61
|
13501
|
"""Loading unittests."""
import os
import re
import sys
import traceback
import types
from functools import cmp_to_key as _CmpToKey
from fnmatch import fnmatch
from . import case, suite
__unittest = True
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s\n%s' % (name, traceback.format_exc())
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(object):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite." \
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, case.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception, e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, case.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
name = parts[-1]
inst = parent(name)
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_directory_containing_module(self, module_name):
module = sys.modules[module_name]
full_path = os.path.abspath(module.__file__)
if os.path.basename(full_path).lower().startswith('__init__.py'):
return os.path.dirname(os.path.dirname(full_path))
else:
# here we have been given a module rather than a package - so
# all we can do is search the *same* directory the module is in
# should an exception be raised instead
return os.path.dirname(full_path)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = os.path.relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(os.path.realpath(mod_file))[0]
fullpath_noext = os.path.splitext(os.path.realpath(full_path))[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception, e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
|
gpl-3.0
|
aaronorosen/horizon-congress
|
openstack_dashboard/dashboards/project/networks/ports/tables.py
|
4
|
2995
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django import template
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from openstack_dashboard import api
def get_fixed_ips(port):
template_name = 'project/networks/ports/_port_ips.html'
context = {"ips": port.fixed_ips}
return template.loader.render_to_string(template_name, context)
def get_attached(port):
if port['device_owner']:
return port['device_owner']
elif port['device_id']:
return _('Attached')
else:
return _('Detached')
class UpdatePort(tables.LinkAction):
name = "update"
verbose_name = _("Edit Port")
url = "horizon:project:networks:editport"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("network", "update_port"),)
def get_policy_target(self, request, datum=None):
project_id = None
if datum:
project_id = getattr(datum, 'tenant_id', None)
return {"project_id": project_id}
def get_link_url(self, port):
network_id = self.table.kwargs['network_id']
return reverse(self.url, args=(network_id, port.id))
class PortsTable(tables.DataTable):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:networks:ports:detail")
fixed_ips = tables.Column(get_fixed_ips, verbose_name=_("Fixed IPs"))
attached = tables.Column(get_attached, verbose_name=_("Attached Device"))
status = tables.Column("status", verbose_name=_("Status"))
admin_state = tables.Column("admin_state",
verbose_name=_("Admin State"))
mac_state = tables.Column("mac_state", empty_value=api.neutron.OFF_STATE,
verbose_name=_("MAC Learning State"))
def get_object_display(self, port):
return port.id
class Meta:
name = "ports"
verbose_name = _("Ports")
row_actions = (UpdatePort,)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(PortsTable, self).__init__(request, data=data,
needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.neutron.is_extension_supported(request, 'mac-learning'):
del self.columns['mac_state']
|
apache-2.0
|
evidation-health/bokeh
|
bokeh/sphinxext/collapsible_code_block.py
|
43
|
3128
|
""" Display code blocks in collapsible sections when outputting
to HTML.
Usage
-----
This directive takes a heading to use for the collapsible code block::
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
Options
-------
This directive is identical to the standard ``code-block`` directive
that Sphinx supplies, with the addition of one new option:
heading : string
A heading to put for the collapsible block. Clicking the heading
expands or collapes the block
Examples
--------
The inline example code above produces the following output:
----
.. collapsible-code-block:: python
:heading: Some Code
from __future__ import print_function
print("Hello, Bokeh!")
"""
from __future__ import absolute_import
from docutils import nodes
from docutils.parsers.rst.directives import unchanged
from os.path import basename
import jinja2
from sphinx.directives.code import CodeBlock
PROLOGUE_TEMPLATE = jinja2.Template(u"""
<div class="panel-group" id="accordion" role="tablist" aria-multiselectable="true">
<div class="panel panel-default">
<div class="panel-heading" role="tab" id="heading-{{ id }}">
<h4 class="panel-title">
<a class="collapsed" data-toggle="collapse" data-parent="#accordion" href="#collapse-{{ id }}" aria-expanded="false" aria-controls="collapse-{{ id }}">
{{ heading }}
</a>
</h4>
</div>
<div id="collapse-{{ id }}" class="panel-collapse collapse" role="tabpanel" aria-labelledby="heading-{{ id }}">
<div class="panel-body">
""")
EPILOGUE_TEMPLATE = jinja2.Template(u"""
</div>
</div>
</div>
</div>
""")
class collapsible_code_block(nodes.General, nodes.Element):
pass
class CollapsibleCodeBlock(CodeBlock):
option_spec = CodeBlock.option_spec
option_spec.update(heading=unchanged)
def run(self):
env = self.state.document.settings.env
rst_source = self.state_machine.node.document['source']
rst_filename = basename(rst_source)
target_id = "%s.ccb-%d" % (rst_filename, env.new_serialno('bokeh-plot'))
target_id = target_id.replace(".", "-")
target_node = nodes.target('', '', ids=[target_id])
node = collapsible_code_block()
node['target_id'] = target_id
node['heading'] = self.options.get('heading', "Code")
cb = CodeBlock.run(self)
node.setup_child(cb[0])
node.children.append(cb[0])
return [target_node, node]
def html_visit_collapsible_code_block(self, node):
self.body.append(
PROLOGUE_TEMPLATE.render(
id=node['target_id'],
heading=node['heading']
)
)
def html_depart_collapsible_code_block(self, node):
self.body.append(EPILOGUE_TEMPLATE.render())
def setup(app):
app.add_node(
collapsible_code_block,
html=(
html_visit_collapsible_code_block,
html_depart_collapsible_code_block
)
)
app.add_directive('collapsible-code-block', CollapsibleCodeBlock)
|
bsd-3-clause
|
rozifus/TeamStrong13_4
|
por/obstacle.py
|
1
|
1320
|
# everything to do with the rubies
import pyglet
import settings
import entity
import utils
from utils import Vec2d, Point, Rect
class Obstacle(entity.Entity):
"""
In case we want more than one type?
"""
IMAGE = settings.ANVIL_IMAGE
def collided(self, game):
game.die()
class InfiniteHeightObstacle(Obstacle):
_collided = False
def collides_with(self, other):
"""
Make our own height infinite. We only need to check if
the other entity straddles us.
"""
if self.name == 'start' or self._collided:
# don't collide with the first spawn point. It screws up the numbering.
return False
# ~
# `
# o
# |
# |
# |
# x[ ]+width
self._collided = collided = self.gp.x < other.gp.x
return collided
class EndLevel(InfiniteHeightObstacle):
"""
Hit this, and it's all over.
"""
IMAGE = settings.POST_IMAGE
def collided(self, game):
game.finish()
class Spawn(InfiniteHeightObstacle):
"""
Hit this and postgres is saved.
"""
IMAGE = settings.SPAWN_IMAGE
def collided(self, game):
# remove the just passed spawn point.
game.spawn_points.pop(0)
|
mit
|
djw8605/campus-factory
|
python-lib/GlideinWMS/condorMonitor.py
|
7
|
19973
|
#
# Project:
# glideinWMS
#
# File Version:
# $Id: condorMonitor.py,v 1.10.8.1.2.2.6.1 2010/09/22 03:08:53 sfiligoi Exp $
#
# Description:
# This module implements classes to query the condor daemons
# and manipulate the results
# Please notice that it also converts \" into "
#
# Author:
# Igor Sfiligoi (Aug 30th 2006)
#
import GlideinWMS.condorExe as condorExe
import GlideinWMS.condorSecurity as condorSecurity
import os,string
import copy
import xml.parsers.expat
#
# Configuration
#
# Set path to condor binaries
def set_path(new_condor_bin_path):
global condor_bin_path
condor_bin_path=new_condor_bin_path
#
# Condor monitoring classes
#
# Generic, you most probably don't want to use these
class AbstractQuery: # pure virtual, just to have a minimum set of methods defined
# returns the data, will not modify self
def fetch(self,constraint=None,format_list=None):
raise RuntimeError,"Fetch not implemented"
# will fetch in self.stored_data
def load(self,constraint=None,format_list=None):
raise RuntimeError,"Load not implemented"
# constraint_func is a boolean function, with only one argument (data el)
# same output as fetch, but limited to constraint_func(el)==True
#
# if constraint_func==None, return all the data
def fetchStored(self,constraint_func=None):
raise RuntimeError,"fetchStored not implemented"
class StoredQuery(AbstractQuery): # still virtual, only fetchStored defined
def fetchStored(self,constraint_func=None):
return applyConstraint(self.stored_data,constraint_func)
#
# format_list is a list of
# (attr_name, attr_type)
# where attr_type is one of
# "s" - string
# "i" - integer
# "r" - real (float)
# "b" - bool
#
#
# security_obj, if defined, should be a child of condorSecurity.ProtoRequest
class QueryExe(StoredQuery): # first fully implemented one, execute commands
def __init__(self,exe_name,resource_str,group_attribute,pool_name=None,security_obj=None):
self.exe_name=exe_name
self.resource_str=resource_str
self.group_attribute=group_attribute
self.pool_name=pool_name
if pool_name==None:
self.pool_str=""
else:
self.pool_str="-pool %s"%pool_name
if security_obj!=None:
if security_obj.has_saved_state():
raise RuntimeError, "Cannot use a security object which has saved state."
self.security_obj=copy.deepcopy(security_obj)
else:
self.security_obj=condorSecurity.ProtoRequest()
def require_integrity(self,requested_integrity): # if none, dont change, else forse that one
if requested_integrity==None:
condor_val=None
elif requested_integrity:
condor_val="REQUIRED"
else:
# if not required, still should not fail if the other side requires it
condor_val='OPTIONAL'
self.security_obj.set('CLIENT','INTEGRITY',condor_val)
def get_requested_integrity(self):
condor_val = self.security_obj.get('CLIENT','INTEGRITY')
if condor_val==None:
return None
return (condor_val=='REQUIRED')
def require_encryption(self,requested_encryption): # if none, dont change, else forse that one
if requested_encryption==None:
condor_val=None
elif requested_encryption:
condor_val="REQUIRED"
else:
# if not required, still should not fail if the other side requires it
condor_val='OPTIONAL'
self.security_obj.set('CLIENT','ENCRYPTION',condor_val)
def get_requested_encryption(self):
condor_val = self.security_obj.get('CLIENT','ENCRYPTION')
if condor_val==None:
return None
return (condor_val=='REQUIRED')
def fetch(self,constraint=None,format_list=None):
if constraint==None:
constraint_str=""
else:
constraint_str="-constraint '%s'"%constraint
full_xml=(format_list==None)
if format_list!=None:
format_arr=["-format '<c>' ClusterId"] #clusterid is always there, so this will always be printed out
for format_el in format_list:
attr_name,attr_type=format_el
attr_format={'s':'%s','i':'%i','r':'%f','b':'%i'}[attr_type]
format_arr.append('-format \'<a n="%s"><%s>%s</%s></a>\' %s'%(attr_name,attr_type,attr_format,attr_type,attr_name))
format_arr.append("-format '</c>' ClusterId") #clusterid is always there, so this will always be printed out
format_str=string.join(format_arr," ")
# set environment for security settings
self.security_obj.save_state()
self.security_obj.enforce_requests()
if full_xml:
xml_data=condorExe.exe_cmd(self.exe_name,"%s -xml %s %s"%(self.resource_str,self.pool_str,constraint_str));
else:
xml_data=condorExe.exe_cmd(self.exe_name,"%s %s %s %s"%(self.resource_str,format_str,self.pool_str,constraint_str));
xml_data=['<?xml version="1.0"?><classads>']+xml_data+["</classads>"]
# restore old values
self.security_obj.restore_state()
list_data=xml2list(xml_data)
del xml_data
dict_data=list2dict(list_data,self.group_attribute)
return dict_data
def load(self,constraint=None,format_list=None):
self.stored_data=self.fetch(constraint,format_list)
#
# Fully usable query functions
#
# condor_q
class CondorQ(QueryExe):
def __init__(self,schedd_name=None,pool_name=None,security_obj=None):
self.schedd_name=schedd_name
if schedd_name==None:
schedd_str=""
else:
schedd_str="-name %s"%schedd_name
QueryExe.__init__(self,"condor_q",schedd_str,["ClusterId","ProcId"],pool_name,security_obj)
def fetch(self,constraint=None,format_list=None):
if format_list!=None:
# check that ClusterId and ProcId are present, and if not add them
format_list=complete_format_list(format_list, [("ClusterId",'i'),("ProcId",'i')])
return QueryExe.fetch(self,constraint=constraint,format_list=format_list)
# condor_q, where we have only one ProcId x ClusterId
class CondorQLite(QueryExe):
def __init__(self,schedd_name=None,pool_name=None,security_obj=None):
self.schedd_name=schedd_name
if schedd_name==None:
schedd_str=""
else:
schedd_str="-name %s"%schedd_name
QueryExe.__init__(self,"condor_q",schedd_str,"ClusterId",pool_name,security_obj)
def fetch(self,constraint=None,format_list=None):
if format_list!=None:
# check that ClusterId is present, and if not add it
format_list=complete_format_list(format_list, [("ClusterId",'i')])
return QueryExe.fetch(self,constraint=constraint,format_list=format_list)
# condor_status
class CondorStatus(QueryExe):
def __init__(self,subsystem_name=None,pool_name=None,security_obj=None):
if subsystem_name==None:
subsystem_str=""
else:
subsystem_str="-%s"%subsystem_name
QueryExe.__init__(self,"condor_status",subsystem_str,"Name",pool_name,security_obj)
def fetch(self,constraint=None,format_list=None):
if format_list!=None:
# check that Name present and if not, add it
format_list=complete_format_list(format_list, [("Name",'s')])
return QueryExe.fetch(self,constraint=constraint,format_list=format_list)
#
# Subquery classes
#
# Generic, you most probably don't want to use this
class BaseSubQuery(StoredQuery):
def __init__(self,query,subquery_func):
self.query=query
self.subquery_func=subquery_func
def fetch(self,constraint=None):
indata=self.query.fetch(constraint)
return self.subquery_func(self,indata)
#
# NOTE: You need to call load on the SubQuery object to use fetchStored
# and had query.load issued before
#
def load(self,constraint=None):
indata=self.query.fetchStored(constraint)
self.stored_data = self.subquery_func(indata)
#
# Fully usable subquery functions
#
class SubQuery(BaseSubQuery):
def __init__(self,query,constraint_func=None):
BaseSubQuery.__init__(self,query,lambda d:applyConstraint(d,constraint_func))
class Group(BaseSubQuery):
# group_key_func - Key extraction function
# One argument: classad dictionary
# Returns: value of the group key
# group_data_func - Key extraction function
# One argument: list of classad dictionaries
# Returns: a summary classad dictionary
def __init__(self,query,group_key_func,group_data_func):
BaseSubQuery.__init__(self,query,lambda d:doGroup(d,group_key_func,group_data_func))
#
# Summarizing classes
#
class Summarize:
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
def __init__(self,query,hash_func=lambda x:1):
self.query=query
self.hash_func=hash_func
# Parameters:
# constraint - string to be passed to query.fetch()
# hash_func - if !=None, use this instead of the main one
# Returns a dictionary of hash values
# Elements are counts (or more dictionaries if hash returns lists)
def count(self,constraint=None,hash_func=None):
data=self.query.fetch(constraint)
return fetch2count(data,self.getHash(hash_func))
# Use data pre-stored in query
# Same output as count
def countStored(self,constraint_func=None,hash_func=None):
data=self.query.fetchStored(constraint_func)
return fetch2count(data,self.getHash(hash_func))
# Parameters, same as count
# Returns a dictionary of hash values
# Elements are lists of keys (or more dictionaries if hash returns lists)
def list(self,constraint=None,hash_func=None):
data=self.query.fetch(constraint)
return fetch2list(data,self.getHash(hash_func))
# Use data pre-stored in query
# Same output as list
def listStored(self,constraint_func=None,hash_func=None):
data=self.query.fetchStored(constraint_func)
return fetch2list(data,self.getHash(hash_func))
### Internal
def getHash(self,hash_func):
if hash_func==None:
return self.hash_func
else:
return hash_func
class SummarizeMulti:
def __init__(self,queries,hash_func=lambda x:1):
self.counts=[]
for query in queries:
self.counts.append(Count(query,hash_func))
self.hash_func=hash_func
# see Count for description
def count(self,constraint=None,hash_func=None):
out={}
for c in self.counts:
data=c.count(constraint,hash_func)
addDict(out,data)
return out
# see Count for description
def countStored(self,constraint_func=None,hash_func=None):
out={}
for c in self.counts:
data=c.countStored(constraint_func,hash_func)
addDict(out,data)
return out
############################################################
#
# P R I V A T E, do not use
#
############################################################
# check that req_format_els are present in in_format_list, and if not add them
# return a new format_list
def complete_format_list(in_format_list, req_format_els):
out_format_list=in_format_list[0:]
for req_format_el in req_format_els:
found=False
for format_el in in_format_list:
if format_el[0]==req_format_el[0]:
found=True
break
if not found:
out_format_list.append(req_format_el)
return out_format_list
#
# Convert Condor XML to list
#
# For Example:
#
#<?xml version="1.0"?>
#<!DOCTYPE classads SYSTEM "classads.dtd">
#<classads>
#<c>
# <a n="MyType"><s>Job</s></a>
# <a n="TargetType"><s>Machine</s></a>
# <a n="AutoClusterId"><i>0</i></a>
# <a n="ExitBySignal"><b v="f"/></a>
# <a n="TransferOutputRemaps"><un/></a>
# <a n="WhenToTransferOutput"><s>ON_EXIT</s></a>
#</c>
#<c>
# <a n="MyType"><s>Job</s></a>
# <a n="TargetType"><s>Machine</s></a>
# <a n="AutoClusterId"><i>0</i></a>
# <a n="OnExitRemove"><b v="t"/></a>
# <a n="x509userproxysubject"><s>/DC=gov/DC=fnal/O=Fermilab/OU=People/CN=Igor Sfiligoi/UID=sfiligoi</s></a>
#</c>
#</classads>
#
# 3 xml2list XML handler functions
def xml2list_start_element(name, attrs):
global xml2list_data,xml2list_inclassad,xml2list_inattr,xml2list_intype
if name=="c":
xml2list_inclassad = {}
elif name=="a":
xml2list_inattr={"name":attrs["n"],"val":""}
xml2list_intype="s"
elif name=="i":
xml2list_intype="i"
elif name=="r":
xml2list_intype="r"
elif name=="b":
xml2list_intype="b"
if attrs.has_key('v'):
xml2list_inattr["val"] = (attrs["v"] in ('T','t','1'))
else:
xml2list_inattr["val"] = None # extended syntax... value in text area
elif name=="un":
xml2list_intype="un"
xml2list_inattr["val"] = None
elif name in ("s","e"):
pass # nothing to do
elif name=="classads":
pass # top element, nothing to do
else:
raise TypeError,"Unsupported type: %s"%name
def xml2list_end_element(name):
global xml2list_data,xml2list_inclassad,xml2list_inattr,xml2list_intype
if name=="c":
xml2list_data.append(xml2list_inclassad)
xml2list_inclassad = None
elif name=="a":
xml2list_inclassad[xml2list_inattr["name"]]=xml2list_inattr["val"]
xml2list_inattr = None
elif name in ("i","b","un","r"):
xml2list_intype="s"
elif name in ("s","e"):
pass # nothing to do
elif name=="classads":
pass # top element, nothing to do
else:
raise TypeError,"Unexpected type: %s"%name
def xml2list_char_data(data):
global xml2list_data,xml2list_inclassad,xml2list_inattr,xml2list_intype
if xml2list_inattr==None:
return # only process when in attribute
if xml2list_intype=="i":
xml2list_inattr["val"]= int(data)
elif xml2list_intype=="r":
xml2list_inattr["val"]= float(data)
elif xml2list_intype=="b":
if xml2list_inattr["val"]!=None:
pass #nothing to do, value was in attribute
else:
xml2list_inattr["val"]=(data[0] in ('T','t','1'))
elif xml2list_intype=="un":
pass #nothing to do, value was in attribute
else:
unescaped_data=string.replace(data,'\\"','"')
xml2list_inattr["val"]+= unescaped_data
def xml2list(xml_data):
global xml2list_data,xml2list_inclassad,xml2list_inattr,xml2list_intype
xml2list_data=[]
xml2list_inclassad=None
xml2list_inattr=None
xml2list_intype=None
p = xml.parsers.expat.ParserCreate()
p.StartElementHandler = xml2list_start_element
p.EndElementHandler = xml2list_end_element
p.CharacterDataHandler = xml2list_char_data
found_xml=-1
for line in range(len(xml_data)):
# look for the xml header
if xml_data[line][:5]=="<?xml":
found_xml=line
break
if found_xml>=0:
try:
p.Parse(string.join(xml_data[found_xml:]),1)
except TypeError, e:
raise RuntimeError, "Failed to parse XML data, TypeError: %s"%e
except:
raise RuntimeError, "Failed to parse XML data, generic error"
# else no xml, so return an empty list
return xml2list_data
#
# Convert a list to a dictionary
#
def list2dict(list_data,attr_name):
if type(attr_name) in (type([]),type((1,2))):
attr_list=attr_name
else:
attr_list=[attr_name]
dict_data={}
for list_el in list_data:
if type(attr_name) in (type([]),type((1,2))):
dict_name=[]
for an in attr_name:
dict_name.append(list_el[an])
dict_name=tuple(dict_name)
else:
dict_name=list_el[attr_name]
# dict_el will have all the elements but those in attr_list
dict_el={}
for a in list_el:
if not (a in attr_list):
dict_el[a]=list_el[a]
dict_data[dict_name]=dict_el
return dict_data
def applyConstraint(data,constraint_func):
if constraint_func==None:
return data
else:
outdata={}
for key in data.keys():
if constraint_func(data[key]):
outdata[key]=data[key]
return outdata
def doGroup(indata,group_key_func,group_data_func):
gdata={}
for k in indata.keys():
inel=indata[k]
gkey=group_key_func(inel)
if gdata.has_key(gkey):
gdata[gkey].append(inel)
else:
gdata[gkey]=[inel]
outdata={}
for k in gdata.keys():
outdata[k]=group_data_func(gdata[k])
return outdata
#
# Inputs
# data - data from a fetch()
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
#
# Returns a dictionary of hash values
# Elements are counts (or more dictionaries if hash returns lists)
#
def fetch2count(data,hash_func):
count={}
for k in data.keys():
el=data[k]
hid=hash_func(el)
if hid==None:
continue # hash tells us it does not want to count this
# cel will point to the real counter
cel=count
# check if it is a list
if (type(hid)==type([])):
# have to create structure inside count
for h in hid[:-1]:
if not cel.has_key(h):
cel[h]={}
cel=cel[h]
hid=hid[-1]
if cel.has_key(hid):
count_el=cel[hid]+1
else:
count_el=1
cel[hid]=count_el
return count
#
# Inputs
# data - data from a fetch()
# hash_func - Hashing function
# One argument: classad dictionary
# Returns: hash value
# if None, will not be counted
# if a list, all elements will be used
#
# Returns a dictionary of hash values
# Elements are lists of keys (or more dictionaries if hash returns lists)
#
def fetch2list(data,hash_func):
list={}
for k in data.keys():
el=data[k]
hid=hash_func(el)
if hid==None:
continue # hash tells us it does not want to list this
# lel will point to the real list
lel=list
# check if it is a list
if (type(hid)==type([])):
# have to create structure inside list
for h in hid[:-1]:
if not lel.has_key(h):
lel[h]={}
lel=lel[h]
hid=hid[-1]
if lel.has_key(hid):
list_el=lel[hid].append[k]
else:
list_el=[k]
lel[hid]=list_el
return list
#
# Recursivelly add two dictionaries
# Do it in place, using the first one
#
def addDict(base_dict,new_dict):
for k in new_dict.keys():
new_el=new_dict[k]
if not base_dict.has_key(k):
# nothing there?, just copy
base_dict[k]=new_el
else:
if type(new_el)==type({}): #another dictionary, recourse
addDict(base_dict[k],new_el)
else:
base_dict[k]+=new_el
|
apache-2.0
|
tjhei/burnman
|
burnman/perplex.py
|
1
|
14384
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
from __future__ import print_function
import warnings
from subprocess import Popen, PIPE, STDOUT
from os import rename
import numpy as np
from scipy.interpolate import interp2d, griddata
from .material import Material, material_property
from . import eos
from .tools import copy_documentation
def create_perplex_table(werami_path, project_name, outfile, n_pressures, n_temperatures, pressure_range=None, temperature_range=None):
'''
This function uses PerpleX's werami software to output a table file containing the following material properties.
2 - Density (kg/m3)
4 - Expansivity (1/K, for volume)
5 - Compressibility (1/bar, for volume)
10 - Adiabatic bulk modulus (bar)
11 - Adiabatic shear modulus (bar)
12 - Sound velocity (km/s)
13 - P-wave velocity (Vp, km/s)
14 - S-wave velocity (Vs, km/s)
17 - Entropy (J/K/kg)
18 - Enthalpy (J/kg)
19 - Heat Capacity (J/K/kg)
22 - Molar Volume (J/bar)
'''
print('Working on creating {0}x{1} P-T table file using werami. Please wait.\n'.format(n_pressures, n_temperatures))
try:
str2 = 'y\n{0} {1}\n{2} {3}\n'.format(pressure_range[0]/1.e5, pressure_range[1]/1.e5,
temperature_range[0], temperature_range[1])
except:
print('Keeping P-T range the same as the original project range.\n')
str2 = 'n\n'
stdin='{0:s}\n2\n' \
'2\nn\n' \
'4\nn\n' \
'5\nn\n' \
'10\nn\n' \
'11\nn\n' \
'12\nn\n' \
'13\nn\n' \
'14\nn\n' \
'17\nn\n' \
'18\nn\n' \
'19\nn\n' \
'22\nn\n' \
'0\n' \
'{1:s}' \
'{2:d} {3:d}\n' \
'0'.format(project_name, str2, n_pressures, n_temperatures)
p = Popen(werami_path, stdout=PIPE, stdin=PIPE, stderr=STDOUT)
stdout = p.communicate(input=stdin)[0]
print(stdout)
out = [s for s in stdout.split('\n') if "Output has been written to the" in s][0].split()[-1]
rename(out, outfile)
print('Output file renamed to {0:s}'.format(outfile))
print('Processing complete')
class PerplexMaterial(Material):
"""
This is the base class for a PerpleX material. States of the material
can only be queried after setting the pressure and temperature
using set_state().
Instances of this class are initialised with
a 2D PerpleX tab file. This file should be in the standard format
(as output by werami), and should have columns with the following names:
'rho,kg/m3', 'alpha,1/K', 'beta,1/bar', 'Ks,bar', 'Gs,bar', 'v0,km/s',
'vp,km/s', 'vs,km/s', 's,J/K/kg', 'h,J/kg', 'cp,J/K/kg', 'V,J/bar/mol'.
The order of these names is not important.
Properties of the material are determined by linear interpolation from
the PerpleX grid. They are all returned in SI units on a molar basis,
even though the PerpleX tab file is not in these units.
This class is available as ``burnman.PerplexMaterial``.
"""
def __init__(self, tab_file):
self.params = {'name': tab_file}
self._property_interpolators, self.params['molar_mass'], self.bounds = self._read_2D_perplex_file(tab_file)
Material.__init__(self)
def _read_2D_perplex_file(self, filename):
with open(filename, 'r') as f:
datastream = f.read()
lines = [line.strip().split()
for line in datastream.split('\n') if line.strip()]
if lines[2][0] != '2':
raise Exception('This is not a 2D PerpleX table')
bounds = [(float(lines[4][0]), float(lines[5][0]), int(lines[6][0])),
(float(lines[8][0]), float(lines[9][0]), int(lines[10][0]))]
if lines[3][0] == 'P(bar)' and lines[7][0] == 'T(K)':
Pmin, Pint, nP = bounds[0]
Tmin, Tint, nT = bounds[1]
elif lines[3][0] == 'T(K)' and lines[7][0] == 'P(bar)':
Pmin, Pint, nP = bounds[1]
Tmin, Tint, nT = bounds[0]
else:
raise Exception('This file does not have a recognised PerpleX structure.\n'
'Are the independent variables P(bar) and T(K)?')
Pmin = Pmin*1.e5 # bar to Pa
Pint = Pint*1.e5 # bar to Pa
Pmax = Pmin + Pint*(nP-1.)
Tmax = Tmin + Tint*(nT-1.)
pressures = np.linspace(Pmin, Pmax, nP)
temperatures = np.linspace(Tmin, Tmax, nT)
n_properties = int(lines[11][0])
property_list = lines[12]
# property_table[i][j][k] returns the kth property at the ith pressure and jth temperature
table = np.array([[float(string) for string in line] for line in lines[13:13+nP*nT]])
if lines[3][0] == 'P(bar)':
property_table = np.swapaxes(table.reshape(nT, nP, n_properties), 0, 1)
else:
property_table = table.reshape(nP, nT, n_properties)
ordered_property_list = ['rho,kg/m3',
'alpha,1/K',
'beta,1/bar',
'Ks,bar',
'Gs,bar',
'v0,km/s',
'vp,km/s',
'vs,km/s',
's,J/K/kg',
'h,J/kg',
'cp,J/K/kg',
'V,J/bar/mol']
p_indices = [i for i, p in enumerate(property_list) for ordered_p in ordered_property_list if p == ordered_p]
properties = {}
for i, p_idx in enumerate(p_indices):
# Fill in NaNs as long as they aren't in the corners of the P-T grid
a = np.array(property_table[:,:,[p_idx]][:,:,0])
x, y = np.indices(a.shape)
a[np.isnan(a)] = griddata((x[~np.isnan(a)], y[~np.isnan(a)]), # points we know
a[~np.isnan(a)], # values we know
(x[np.isnan(a)], y[np.isnan(a)]))
properties[ordered_property_list[i]] = a
densities = properties['rho,kg/m3']
volumes = 1.e-5 * properties['V,J/bar/mol']
molar_masses = densities*volumes
molar_mass = np.mean(molar_masses)
property_interpolators = {'rho': interp2d(pressures, temperatures, densities.T, bounds_error=True),
'alpha': interp2d(pressures, temperatures, properties['alpha,1/K'].T, bounds_error=True),
'K_T': interp2d(pressures, temperatures, 1.e5 / properties['beta,1/bar'].T, bounds_error=True),
'K_S': interp2d(pressures, temperatures, 1.e5 * properties['Ks,bar'].T, bounds_error=True),
'G_S': interp2d(pressures, temperatures, 1.e5 * properties['Gs,bar'].T, bounds_error=True),
'bulk_sound_velocity': interp2d(pressures, temperatures, 1.e3*properties['v0,km/s'].T, bounds_error=True),
'p_wave_velocity': interp2d(pressures, temperatures, 1.e3*properties['vp,km/s'].T, bounds_error=True),
's_wave_velocity': interp2d(pressures, temperatures, 1.e3*properties['vs,km/s'].T, bounds_error=True),
'S': interp2d(pressures, temperatures, properties['s,J/K/kg'].T*molar_masses.T, bounds_error=True),
'H': interp2d(pressures, temperatures, properties['h,J/kg'].T*molar_masses.T, bounds_error=True),
'C_p': interp2d(pressures, temperatures, properties['cp,J/K/kg'].T*molar_masses.T, bounds_error=True),
'V': interp2d(pressures, temperatures, volumes.T, bounds_error=True)}
bounds = [[Pmin, Pmax], [Tmin, Tmax]]
return property_interpolators, molar_mass, bounds
@copy_documentation(Material.set_state)
def set_state(self, pressure, temperature):
if not np.logical_and(np.all(self.bounds[0][0] <= pressure),
np.all(pressure <= self.bounds[0][1])):
raise ValueError("The set_state pressure ({0:.4f}) is outside the bounds of this rock ({1:.4f}-{2:.4f} GPa)".format(pressure,
self.bounds[0][0]/1.e9,
self.bounds[0][1]/1.e9))
if not np.logical_and(np.all(self.bounds[1][0] <= temperature),
np.all(temperature <= self.bounds[1][1])):
raise ValueError("The set_state temperature ({0:.1f}) is outside the bounds of this rock ({1:.1f}-{2:.1f} K)".format(temperature,
self.bounds[1][0],
self.bounds[1][1]))
Material.set_state(self, pressure, temperature)
"""
Properties by linear interpolation of Perple_X output
"""
@material_property
@copy_documentation(Material.molar_volume)
def molar_volume(self):
return self._property_interpolators['V'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.molar_enthalpy)
def molar_enthalpy(self):
return self._property_interpolators['H'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.molar_entropy)
def molar_entropy(self):
return self._property_interpolators['S'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.isothermal_bulk_modulus)
def isothermal_bulk_modulus(self):
return self._property_interpolators['K_T'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.adiabatic_bulk_modulus)
def adiabatic_bulk_modulus(self):
return self._property_interpolators['K_S'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.molar_heat_capacity_p)
def molar_heat_capacity_p(self):
return self._property_interpolators['C_p'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.thermal_expansivity)
def thermal_expansivity(self):
return self._property_interpolators['alpha'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.shear_modulus)
def shear_modulus(self):
return self._property_interpolators['G_S'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.p_wave_velocity)
def p_wave_velocity(self):
return self._property_interpolators['p_wave_velocity'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.bulk_sound_velocity)
def bulk_sound_velocity(self):
return self._property_interpolators['bulk_sound_velocity'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.shear_wave_velocity)
def shear_wave_velocity(self):
return self._property_interpolators['s_wave_velocity'](self.pressure, self.temperature)[0]
"""
Properties from mineral parameters,
Legendre transformations
or Maxwell relations
"""
@material_property
@copy_documentation(Material.molar_gibbs)
def molar_gibbs(self):
return self.molar_enthalpy - self.temperature*self.molar_entropy
@material_property
@copy_documentation(Material.molar_mass)
def molar_mass(self):
if 'molar_mass' in self.params:
return self.params['molar_mass']
else:
raise ValueError(
"No molar_mass parameter for mineral " + self.to_string + ".")
@material_property
@copy_documentation(Material.density)
def density(self):
return self._property_interpolators['rho'](self.pressure, self.temperature)[0]
@material_property
@copy_documentation(Material.molar_internal_energy)
def molar_internal_energy(self):
return self.molar_gibbs - self.pressure * self.molar_volume + self.temperature * self.molar_entropy
@material_property
@copy_documentation(Material.molar_helmholtz)
def molar_helmholtz(self):
return self.molar_gibbs - self.pressure * self.molar_volume
@material_property
@copy_documentation(Material.isothermal_compressibility)
def isothermal_compressibility(self):
return 1. / self.isothermal_bulk_modulus
@material_property
@copy_documentation(Material.adiabatic_compressibility)
def adiabatic_compressibility(self):
return 1. / self.adiabatic_bulk_modulus
@material_property
@copy_documentation(Material.molar_heat_capacity_v)
def molar_heat_capacity_v(self):
return self.molar_heat_capacity_p - self.molar_volume * self.temperature \
* self.thermal_expansivity * self.thermal_expansivity \
* self.isothermal_bulk_modulus
@material_property
@copy_documentation(Material.grueneisen_parameter)
def grueneisen_parameter(self):
return ( self.thermal_expansivity *
self.molar_volume *
self.adiabatic_bulk_modulus /
self.molar_heat_capacity_p )
|
gpl-2.0
|
cnsoft/kbengine-cocos2dx
|
kbe/src/lib/python/Lib/test/regex_tests.py
|
59
|
8992
|
# Regex test suite and benchmark suite v1.5a2
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*\\(Python\\)', 'Python'), # Bad text literal with grouping
('(Python\\|Perl\\|Tcl', 'Perl'), # Alternation
('\\(Python\\|Perl\\|Tcl\\)', 'Perl'), # Grouped alternation
('\\(Python\\)\\1', 'PythonPython'), # Backreference
# ('\\([0a-z][a-z]*,\\)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('\\([a-z][a-z0-9]*,\\)+', 'a5,b7,c9,') # A few sets
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g10" contain the contents of each group, or the
# string 'None' if the group wasn't given a value.
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
('abc', 'abc', SUCCEED,
'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED,
'found', 'abc'),
('abc', 'ababc', SUCCEED,
'found', 'abc'),
('ab*c', 'abc', SUCCEED,
'found', 'abc'),
('ab*bc', 'abc', SUCCEED,
'found', 'abc'),
('ab*bc', 'abbc', SUCCEED,
'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED,
'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED,
'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED,
'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED,
'found', 'abbc'),
('ab?bc', 'abc', SUCCEED,
'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED,
'found', 'abc'),
('^abc$', 'abc', SUCCEED,
'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED,
'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED,
'found', 'abc'),
('^', 'abc', SUCCEED,
'found+"-"', '-'),
('$', 'abc', SUCCEED,
'found+"-"', '-'),
('a.c', 'abc', SUCCEED,
'found', 'abc'),
('a.c', 'axc', SUCCEED,
'found', 'axc'),
('a.*c', 'axyzc', SUCCEED,
'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED,
'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED,
'found', 'ace'),
('a[b-d]', 'aac', SUCCEED,
'found', 'ac'),
('a[-b]', 'a-', SUCCEED,
'found', 'a-'),
('a[b-]', 'a-', SUCCEED,
'found', 'a-'),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc\\)', '-', SYNTAX_ERROR),
('\\(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED,
'found', 'a]'),
('a[]]b', 'a]b', SUCCEED,
'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED,
'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED,
'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED,
'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED,
'"-"', '-'),
('\\ba\\b', '-a', SUCCEED,
'"-"', '-'),
('\\ba\\b', '-a-', SUCCEED,
'"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('ab\\|cd', 'abc', SUCCEED,
'found', 'ab'),
('ab\\|cd', 'abcd', SUCCEED,
'found', 'ab'),
('\\(\\)ef', 'def', SUCCEED,
'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a(b', 'a(b', SUCCEED,
'found+"-"+g1', 'a(b-None'),
('a(*b', 'ab', SUCCEED,
'found', 'ab'),
('a(*b', 'a((b', SUCCEED,
'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED,
'found', 'a\\b'),
('\\(\\(a\\)\\)', 'abc', SUCCEED,
'found+"-"+g1+"-"+g2', 'a-a-a'),
('\\(a\\)b\\(c\\)', 'abc', SUCCEED,
'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED,
'found', 'abc'),
('\\(a+\\|b\\)*', 'ab', SUCCEED,
'found+"-"+g1', 'ab-b'),
('\\(a+\\|b\\)+', 'ab', SUCCEED,
'found+"-"+g1', 'ab-b'),
('\\(a+\\|b\\)?', 'ab', SUCCEED,
'found+"-"+g1', 'a-a'),
('\\)\\(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED,
'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED,
'found', ''),
('a\\|b\\|c\\|d\\|e', 'e', SUCCEED,
'found', 'e'),
('\\(a\\|b\\|c\\|d\\|e\\)f', 'ef', SUCCEED,
'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED,
'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED,
'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED,
'found', 'a'),
('\\(ab\\|cd\\)e', 'abcde', SUCCEED,
'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED,
'found', 'hij'),
('^\\(ab\\|cd\\)e', 'abcde', FAIL,
'xg1y', 'xy'),
('\\(abc\\|\\)ef', 'abcdef', SUCCEED,
'found+"-"+g1', 'ef-'),
('\\(a\\|b\\)c*d', 'abcd', SUCCEED,
'found+"-"+g1', 'bcd-b'),
('\\(ab\\|ab*\\)bc', 'abc', SUCCEED,
'found+"-"+g1', 'abc-a'),
('a\\([bc]*\\)c*', 'abc', SUCCEED,
'found+"-"+g1', 'abc-bc'),
('a\\([bc]*\\)\\(c*d\\)', 'abcd', SUCCEED,
'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a\\([bc]+\\)\\(c*d\\)', 'abcd', SUCCEED,
'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a\\([bc]*\\)\\(c+d\\)', 'abcd', SUCCEED,
'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED,
'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('\\(ab\\|a\\)b*c', 'abc', SUCCEED,
'found+"-"+g1', 'abc-ab'),
('\\(\\(a\\)\\(b\\)c\\)\\(d\\)', 'abcd', SUCCEED,
'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED,
'found', 'alpha'),
('^a\\(bc+\\|b[eh]\\)g\\|.h$', 'abh', SUCCEED,
'found+"-"+g1', 'bh-None'),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'effgz', SUCCEED,
'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'ij', SUCCEED,
'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'effg', FAIL),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'bcdd', FAIL),
('\\(bc+d$\\|ef*g.\\|h?i\\(j\\|k\\)\\)', 'reffgz', SUCCEED,
'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('\\(\\(\\(\\(\\(\\(\\(\\(\\(a\\)\\)\\)\\)\\)\\)\\)\\)\\)', 'a', SUCCEED,
'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED,
'found', 'multiple words'),
('\\(.*\\)c\\(.*\\)', 'abcde', SUCCEED,
'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('(\\(.*\\), \\(.*\\))', '(a, b)', SUCCEED,
'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED,
'found', 'ac'),
('\\(abc\\)\\1', 'abcabc', SUCCEED,
'g1', 'abc'),
('\\([a-c]*\\)\\1', 'abcabc', SUCCEED,
'g1', 'abc'),
('^\\(.+\\)?B', 'AB', SUCCEED,
'g1', 'A'),
('\\(a+\\).\\1$', 'aaaaa', SUCCEED,
'found+"-"+g1', 'aaaaa-aa'),
('^\\(a+\\).\\1$', 'aaaa', FAIL),
('\\(abc\\)\\1', 'abcabc', SUCCEED,
'found+"-"+g1', 'abcabc-abc'),
('\\([a-c]+\\)\\1', 'abcabc', SUCCEED,
'found+"-"+g1', 'abcabc-abc'),
('\\(a\\)\\1', 'aa', SUCCEED,
'found+"-"+g1', 'aa-a'),
('\\(a+\\)\\1', 'aa', SUCCEED,
'found+"-"+g1', 'aa-a'),
('\\(a+\\)+\\1', 'aa', SUCCEED,
'found+"-"+g1', 'aa-a'),
('\\(a\\).+\\1', 'aba', SUCCEED,
'found+"-"+g1', 'aba-a'),
('\\(a\\)ba*\\1', 'aba', SUCCEED,
'found+"-"+g1', 'aba-a'),
('\\(aa\\|a\\)a\\1$', 'aaa', SUCCEED,
'found+"-"+g1', 'aaa-a'),
('\\(a\\|aa\\)a\\1$', 'aaa', SUCCEED,
'found+"-"+g1', 'aaa-a'),
('\\(a+\\)a\\1$', 'aaa', SUCCEED,
'found+"-"+g1', 'aaa-a'),
('\\([abc]*\\)\\1', 'abcabc', SUCCEED,
'found+"-"+g1', 'abcabc-abc'),
('\\(a\\)\\(b\\)c\\|ab', 'ab', SUCCEED,
'found+"-"+g1+"-"+g2', 'ab-None-None'),
('\\(a\\)+x', 'aaax', SUCCEED,
'found+"-"+g1', 'aaax-a'),
('\\([ac]\\)+x', 'aacx', SUCCEED,
'found+"-"+g1', 'aacx-c'),
('\\([^/]*/\\)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED,
'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('\\([^.]*\\)\\.\\([^:]*\\):[T ]+\\(.*\\)', 'track1.title:TBlah blah blah', SUCCEED,
'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('\\([^N]*N\\)+', 'abNNxyzN', SUCCEED,
'found+"-"+g1', 'abNNxyzN-xyzN'),
('\\([^N]*N\\)+', 'abNNxyz', SUCCEED,
'found+"-"+g1', 'abNN-N'),
('\\([abc]*\\)x', 'abcx', SUCCEED,
'found+"-"+g1', 'abcx-abc'),
('\\([abc]*\\)x', 'abc', FAIL),
('\\([xyz]*\\)x', 'abcx', SUCCEED,
'found+"-"+g1', 'x-'),
('\\(a\\)+b\\|aac', 'aac', SUCCEED,
'found+"-"+g1', 'aac-None'),
('\<a', 'a', SUCCEED, 'found', 'a'),
('\<a', '!', FAIL),
('a\<b', 'ab', FAIL),
('a\>', 'ab', FAIL),
('a\>', 'a!', SUCCEED, 'found', 'a'),
('a\>', 'a', SUCCEED, 'found', 'a'),
]
|
lgpl-3.0
|
40223231/2015-cdb-g4-final-test-by-6-22
|
static/Brython3.1.1-20150328-091302/Lib/contextlib.py
|
737
|
8788
|
"""Utilities for with-statement contexts. See PEP 343."""
import sys
from collections import deque
from functools import wraps
__all__ = ["contextmanager", "closing", "ContextDecorator", "ExitStack"]
class ContextDecorator(object):
"A base class or mixin that enables context managers to work as decorators."
def _recreate_cm(self):
"""Return a recreated instance of self.
Allows an otherwise one-shot context manager like
_GeneratorContextManager to support use as
a decorator via implicit recreation.
This is a private interface just for _GeneratorContextManager.
See issue #11647 for details.
"""
return self
def __call__(self, func):
@wraps(func)
def inner(*args, **kwds):
with self._recreate_cm():
return func(*args, **kwds)
return inner
class _GeneratorContextManager(ContextDecorator):
"""Helper for @contextmanager decorator."""
def __init__(self, func, *args, **kwds):
self.gen = func(*args, **kwds)
self.func, self.args, self.kwds = func, args, kwds
def _recreate_cm(self):
# _GCM instances are one-shot context managers, so the
# CM must be recreated each time a decorated function is
# called
return self.__class__(self.func, *self.args, **self.kwds)
def __enter__(self):
try:
return next(self.gen)
except StopIteration:
raise RuntimeError("generator didn't yield")
def __exit__(self, type, value, traceback):
if type is None:
try:
next(self.gen)
except StopIteration:
return
else:
raise RuntimeError("generator didn't stop")
else:
if value is None:
# Need to force instantiation so we can reliably
# tell if we get the same exception back
value = type()
try:
self.gen.throw(type, value, traceback)
raise RuntimeError("generator didn't stop after throw()")
except StopIteration as exc:
# Suppress the exception *unless* it's the same exception that
# was passed to throw(). This prevents a StopIteration
# raised inside the "with" statement from being suppressed
return exc is not value
except:
# only re-raise if it's *not* the exception that was
# passed to throw(), because __exit__() must not raise
# an exception unless __exit__() itself failed. But throw()
# has to raise the exception to signal propagation, so this
# fixes the impedance mismatch between the throw() protocol
# and the __exit__() protocol.
#
if sys.exc_info()[1] is not value:
raise
def contextmanager(func):
"""@contextmanager decorator.
Typical usage:
@contextmanager
def some_generator(<arguments>):
<setup>
try:
yield <value>
finally:
<cleanup>
This makes this:
with some_generator(<arguments>) as <variable>:
<body>
equivalent to this:
<setup>
try:
<variable> = <value>
<body>
finally:
<cleanup>
"""
@wraps(func)
def helper(*args, **kwds):
return _GeneratorContextManager(func, *args, **kwds)
return helper
class closing(object):
"""Context to automatically close something at the end of a block.
Code like this:
with closing(<module>.open(<arguments>)) as f:
<block>
is equivalent to this:
f = <module>.open(<arguments>)
try:
<block>
finally:
f.close()
"""
def __init__(self, thing):
self.thing = thing
def __enter__(self):
return self.thing
def __exit__(self, *exc_info):
self.thing.close()
# Inspired by discussions on http://bugs.python.org/issue13585
class ExitStack(object):
"""Context manager for dynamic management of a stack of exit callbacks
For example:
with ExitStack() as stack:
files = [stack.enter_context(open(fname)) for fname in filenames]
# All opened files will automatically be closed at the end of
# the with statement, even if attempts to open files later
# in the list raise an exception
"""
def __init__(self):
self._exit_callbacks = deque()
def pop_all(self):
"""Preserve the context stack by transferring it to a new instance"""
new_stack = type(self)()
new_stack._exit_callbacks = self._exit_callbacks
self._exit_callbacks = deque()
return new_stack
def _push_cm_exit(self, cm, cm_exit):
"""Helper to correctly register callbacks to __exit__ methods"""
def _exit_wrapper(*exc_details):
return cm_exit(cm, *exc_details)
_exit_wrapper.__self__ = cm
self.push(_exit_wrapper)
def push(self, exit):
"""Registers a callback with the standard __exit__ method signature
Can suppress exceptions the same way __exit__ methods can.
Also accepts any object with an __exit__ method (registering a call
to the method instead of the object itself)
"""
# We use an unbound method rather than a bound method to follow
# the standard lookup behaviour for special methods
_cb_type = type(exit)
try:
exit_method = _cb_type.__exit__
except AttributeError:
# Not a context manager, so assume its a callable
self._exit_callbacks.append(exit)
else:
self._push_cm_exit(exit, exit_method)
return exit # Allow use as a decorator
def callback(self, callback, *args, **kwds):
"""Registers an arbitrary callback and arguments.
Cannot suppress exceptions.
"""
def _exit_wrapper(exc_type, exc, tb):
callback(*args, **kwds)
# We changed the signature, so using @wraps is not appropriate, but
# setting __wrapped__ may still help with introspection
_exit_wrapper.__wrapped__ = callback
self.push(_exit_wrapper)
return callback # Allow use as a decorator
def enter_context(self, cm):
"""Enters the supplied context manager
If successful, also pushes its __exit__ method as a callback and
returns the result of the __enter__ method.
"""
# We look up the special methods on the type to match the with statement
_cm_type = type(cm)
_exit = _cm_type.__exit__
result = _cm_type.__enter__(cm)
self._push_cm_exit(cm, _exit)
return result
def close(self):
"""Immediately unwind the context stack"""
self.__exit__(None, None, None)
def __enter__(self):
return self
def __exit__(self, *exc_details):
received_exc = exc_details[0] is not None
# We manipulate the exception state so it behaves as though
# we were actually nesting multiple with statements
frame_exc = sys.exc_info()[1]
def _fix_exception_context(new_exc, old_exc):
while 1:
exc_context = new_exc.__context__
if exc_context in (None, frame_exc):
break
new_exc = exc_context
new_exc.__context__ = old_exc
# Callbacks are invoked in LIFO order to match the behaviour of
# nested context managers
suppressed_exc = False
pending_raise = False
while self._exit_callbacks:
cb = self._exit_callbacks.pop()
try:
if cb(*exc_details):
suppressed_exc = True
pending_raise = False
exc_details = (None, None, None)
except:
new_exc_details = sys.exc_info()
# simulate the stack of exceptions by setting the context
_fix_exception_context(new_exc_details[1], exc_details[1])
pending_raise = True
exc_details = new_exc_details
if pending_raise:
try:
# bare "raise exc_details[1]" replaces our carefully
# set-up context
fixed_ctx = exc_details[1].__context__
raise exc_details[1]
except BaseException:
exc_details[1].__context__ = fixed_ctx
raise
return received_exc and suppressed_exc
|
gpl-3.0
|
anuveyatsu/dpr-api
|
tests/logic/test_classmethods.py
|
2
|
7043
|
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import unittest
from app import create_app
from app.database import db
import app.logic as logic
import app.models as models
class PackageClassMethodsTest(unittest.TestCase):
@classmethod
def setup_class(self):
self.publisher_name = 'demo'
self.package_name = 'demo-package'
self.app = create_app()
self.app.app_context().push()
with self.app.app_context():
db.drop_all()
db.create_all()
self.user = models.User()
self.user.email, self.user.name, self.user.secret = \
'demot@test.com', self.publisher_name, 'super_secret'
self.publisher = models.Publisher(name=self.publisher_name)
self.association = models.PublisherUser(role=models.UserRoleEnum.owner)
self.package = models.Package(name=self.package_name, descriptor={})
self.publisher.packages.append(self.package)
self.association.publisher = self.publisher
self.user.publishers.append(self.association)
db.session.add(self.user)
db.session.commit()
def tests_package_get_method(self):
pkg = logic.Package.get(self.publisher_name, self.package_name)
self.assertEqual(pkg['publisher'], self.publisher_name)
self.assertEqual(pkg['name'], self.package_name)
def tests_package_get_returns_none_if_no_package(self):
pkg = logic.Package.get(self.publisher_name, 'not-a-package')
self.assertIsNone(pkg)
def tests_package_get_returns_none_if_no_publisher(self):
pkg = logic.Package.get('not-a-publisher', self.publisher_name)
self.assertIsNone(pkg)
@classmethod
def teardown_class(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
class PublisherClassMethodsTest(unittest.TestCase):
def setUp(self):
self.publisher_name = 'demo'
self.package_name = 'demo-package'
self.app = create_app()
self.app.app_context().push()
with self.app.app_context():
db.drop_all()
db.create_all()
self.user = models.User()
self.user.email, self.user.name, self.user.secret = \
'demot@test.com', self.publisher_name, 'super_secret'
self.publisher = models.Publisher(name=self.publisher_name)
self.association = models.PublisherUser(role=models.UserRoleEnum.owner)
self.metadata = models.Package(name=self.package_name)
self.metadata.tags.append(models.PackageTag(descriptor={}))
self.publisher.packages.append(self.metadata)
self.association.publisher = self.publisher
self.user.publishers.append(self.association)
db.session.add(self.user)
db.session.commit()
def tests_publisher_get_method(self):
pub = logic.Publisher.get(self.publisher_name)
self.assertEqual(pub['name'], self.publisher_name)
def tests_publisher_get_returns_none_if_no_publisher(self):
pub = logic.Publisher.get('not-a-publisher')
self.assertIsNone(pub)
def test_publisher_create_method_loads_in_db_new_publisher(self):
metadata = {
'name': 'new-publisher'
}
pub = logic.Publisher.create(metadata)
created_pub = models.Publisher.query.get(pub.id)
self.assertEqual(created_pub.name, 'new-publisher')
def test_publisher_create_method_creates_user_publisher_relaitontion_if_user_exists(self):
metadata = {
'name': 'new-publisher',
'users': [{'role': 'owner', 'user_id': 1}]
}
pub = logic.Publisher.create(metadata)
pub_usr = models.PublisherUser.query.filter_by(publisher_id=pub.id).first()
self.assertEqual(pub_usr.publisher_id, 2)
self.assertEqual(pub_usr.publisher_id, pub.id)
self.assertEqual(pub_usr.role, models.UserRoleEnum.owner)
self.assertEqual(pub_usr.publisher.name, 'new-publisher')
self.assertEqual(pub_usr.user.name, self.publisher_name)
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
db.engine.dispose()
class UserClassMethodsTest(unittest.TestCase):
def setUp(self):
self.publisher_name = 'demo'
self.package_name = 'demo-package'
self.app = create_app()
self.app.app_context().push()
with self.app.app_context():
db.drop_all()
db.create_all()
self.user = models.User()
self.user.email, self.user.name, self.user.secret = \
'demot@test.com', self.publisher_name, 'super_secret'
self.publisher = models.Publisher(name=self.publisher_name)
self.association = models.PublisherUser(role=models.UserRoleEnum.owner)
self.metadata = models.Package(name=self.package_name)
self.metadata.tags.append(models.PackageTag(descriptor={}))
self.publisher.packages.append(self.metadata)
self.association.publisher = self.publisher
self.user.publishers.append(self.association)
db.session.add(self.user)
db.session.commit()
def tests_user_get_method(self):
usr = logic.User.get(1)
self.assertEqual(usr['name'], self.publisher_name)
def tests_user_get_returns_none_if_no_user(self):
usr = logic.User.get(2)
self.assertIsNone(usr)
def test_user_create_method_loads_in_db_new_user(self):
metadata = {
'email': 'new@test.com',
'name': 'new-user',
'full_name': 'New User',
}
usr = logic.User.create(metadata)
created_usr = models.User.query.get(usr.id)
self.assertEqual(created_usr.email, 'new@test.com')
self.assertEqual(created_usr.name, 'new-user')
self.assertEqual(created_usr.full_name, 'New User')
def test_user_create_method_creates_user_publisher_relaitontion_if_publisher_exists(self):
metadata = {
'email': 'new@test.com',
'name': 'new-user',
'full_name': 'New User',
'publishers': [{'role': 'owner', 'publisher_id': 1}]
}
usr = logic.User.create(metadata)
pub_usr = models.PublisherUser.query.filter_by(user_id=usr.id).first()
self.assertEqual(pub_usr.publisher_id, 1)
self.assertEqual(pub_usr.user_id, usr.id)
self.assertEqual(pub_usr.role, models.UserRoleEnum.owner)
self.assertEqual(pub_usr.publisher.name, self.publisher_name)
self.assertEqual(pub_usr.user.name, 'new-user')
def tearDown(self):
with self.app.app_context():
db.session.remove()
db.drop_all()
db.engine.dispose()
|
mit
|
gangadhar-kadam/sms-wnframework
|
webnotes/widgets/form/load.py
|
6
|
3757
|
# Copyright (c) 2012 Web Notes Technologies Pvt Ltd (http://erpnext.com)
#
# MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
from __future__ import unicode_literals
import webnotes, json
import webnotes.model.doc
import webnotes.utils
@webnotes.whitelist()
def getdoc(doctype, name, user=None):
"""
Loads a doclist for a given document. This method is called directly from the client.
Requries "doctype", "name" as form variables.
Will also call the "onload" method on the document.
"""
import webnotes
if not (doctype and name):
raise Exception, 'doctype and name required!'
doclist = []
# single
doclist = load_single_doc(doctype, name, user or webnotes.session.user)
webnotes.response['docs'] = doclist
@webnotes.whitelist()
def getdoctype(doctype, with_parent=False, cached_timestamp=None):
"""load doctype"""
import webnotes.model.doctype
import webnotes.model.meta
doclist = []
# with parent (called from report builder)
if with_parent:
parent_dt = webnotes.model.meta.get_parent_dt(doctype)
if parent_dt:
doclist = webnotes.model.doctype.get(parent_dt, processed=True)
webnotes.response['parent_dt'] = parent_dt
if not doclist:
doclist = webnotes.model.doctype.get(doctype, processed=True)
if cached_timestamp and doclist[0].modified==cached_timestamp:
return "use_cache"
# load search criteria for reports (all)
doclist +=get_search_criteria(doctype)
webnotes.response['docs'] = doclist
def load_single_doc(dt, dn, user):
"""load doc and call onload methods"""
# ----- REPLACE BY webnotes.client.get ------
if not dn: dn = dt
if not webnotes.conn.exists(dt, dn):
return None
try:
dl = webnotes.bean(dt, dn).doclist
# add file list
add_file_list(dt, dn, dl)
except Exception, e:
webnotes.errprint(webnotes.utils.getTraceback())
webnotes.msgprint('Error in script while loading')
raise e
if dl and not dn.startswith('_'):
webnotes.user.update_recent(dt, dn)
return dl
def add_file_list(dt, dn, dl):
file_list = {}
for f in webnotes.conn.sql("""select name, file_name, file_url from
`tabFile Data` where attached_to_name=%s and attached_to_doctype=%s""",
(dn, dt), as_dict=True):
file_list[f.file_url or f.file_name] = f.name
if file_list:
dl[0].file_list = json.dumps(file_list)
def get_search_criteria(dt):
"""bundle search criteria with doctype"""
import webnotes.model.doc
# load search criteria for reports (all)
dl = []
sc_list = webnotes.conn.sql("select name from `tabSearch Criteria` where doc_type = '%s' or parent_doc_type = '%s' and (disabled!=1 OR disabled IS NULL)" % (dt, dt))
for sc in sc_list:
if sc[0]:
dl += webnotes.model.doc.get('Search Criteria', sc[0])
return dl
|
mit
|
11craft/immercv
|
immercv/users/admin.py
|
183
|
1048
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update({
'duplicate_username': 'This username has already been taken.'
})
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class UserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
|
bsd-3-clause
|
safnuk/todone
|
todone/commands/tests/test_setup.py
|
1
|
3764
|
from contextlib import redirect_stdout
import io
from unittest import TestCase
from unittest.mock import patch
from todone.backend import DatabaseError
from todone.commands.setup import setup_db, version
from todone import config, __version__
from todone.parser import exceptions as pe
class TestVersion(TestCase):
def test_version_prints_current_version(self):
f = io.StringIO()
with redirect_stdout(f):
version([])
s = f.getvalue()
self.assertIn('Todone', s)
self.assertIn(__version__, s)
def test_version_with_arguments_raises(self):
with self.assertRaises(pe.ArgumentError):
version(['arg'])
@patch('todone.commands.setup.config.save_configuration')
@patch('todone.commands.setup.backend.Database.create')
class TestSetup(TestCase):
def test_setup_without_arguments_raises(
self, mock_create_database, mock_save_configuration
):
with self.assertRaises(pe.ArgumentError):
setup_db([])
mock_create_database.assert_not_called()
def test_setup_with_subcommand_does_not_raise(
self, mock_create_database, mock_save_configuration
):
with patch.dict(config.settings, {'database': {'name': 'nonempty'}}):
setup_db(['init']) # should not raise
def test_setup_with_extra_args_raises(
self, mock_create_database, mock_save_configuration
):
with self.assertRaises(pe.ArgumentError):
setup_db(['init', 'extra'])
mock_create_database.assert_not_called()
def test_setup_init_with_valid_config_calls_create_database_once(
self, mock_create_database, mock_save_configuration
):
with patch.dict(config.settings, {'database': {'name': 'nonempty'}}):
setup_db(['init'])
mock_create_database.assert_called_once_with()
def test_DatabaseError_for_existing_db_prints_db_exists_msg(
self, mock_create_database, mock_save_configuration
):
with patch.dict(config.settings, {'database': {'name': 'nonempty'}}):
mock_create_database.side_effect = DatabaseError(
"Database already exists")
f = io.StringIO()
with redirect_stdout(f):
setup_db(['init'])
s = f.getvalue()
self.assertNotIn('New todone database initialized', s)
self.assertIn('Database has already been setup - get working!', s)
def test_database_creation_error_should_raise(
self, mock_create_database, mock_save_configuration
):
with patch.dict(config.settings, {'database': {'name': 'nonempty'}}):
mock_create_database.side_effect = DatabaseError(
"Could not create the database")
with self.assertRaises(DatabaseError):
setup_db(['init'])
@patch('todone.commands.setup.config.save_configuration')
@patch('todone.commands.setup.backend.Database')
@patch('todone.commands.setup.Setup.get_input', return_value='test file')
class TestInitialize(TestCase):
def test_blank_db_name_queries_creation_of_config_file(
self, mock_input, MockDatabase, mock_save_configuration
):
with patch.dict(config.settings, {'database': {'name': ''}}):
setup_db(['init'])
self.assertEquals(config.settings['database']['name'], 'test file')
mock_save_configuration.assert_called_once_with()
MockDatabase.create.assert_called_once_with()
def test_blank_db_name_calls_Database_update(
self, mock_input, MockDatabase, mock_save_configuration
):
with patch.dict(config.settings, {'database': {'name': ''}}):
setup_db(['init'])
MockDatabase.update.assert_called_once()
|
apache-2.0
|
jalaziz/django-cms-grappelli-old
|
cms/utils/admin.py
|
2
|
3998
|
# -*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from django.conf import settings
from cms.utils.moderator import page_moderator_state, I_APPROVE
from cms.utils import get_language_from_request
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from cms.utils.permissions import has_page_add_permission, has_generic_permission
from django.http import HttpResponse
from cms.models.permissionmodels import GlobalPagePermission
from cms.models.pagemodel import Page
def get_admin_menu_item_context(request, page, filtered=False):
"""Used for rendering the page tree, inserts into context everything what
we need for single item
"""
has_add_page_permission = page.has_add_permission(request)
has_move_page_permission = page.has_move_page_permission(request)
site = Site.objects.get_current()
lang = get_language_from_request(request)
#slug = page.get_slug(language=lang, fallback=True) # why was this here ??
metadata = ""
if settings.CMS_PERMISSION:
# jstree metadata generator
md = []
#if not has_add_page_permission:
if not has_move_page_permission:
md.append(('valid_children', False))
md.append(('draggable', False))
if md:
# just turn it into simple javasript object
metadata = "{" + ", ".join(map(lambda e: "%s: %s" %(e[0],
isinstance(e[1], bool) and str(e[1]) or e[1].lower() ), md)) + "}"
moderator_state = page_moderator_state(request, page)
has_add_on_same_level_permission = False
opts = Page._meta
if (request.user.has_perm(opts.app_label + '.' + opts.get_add_permission()) and
GlobalPagePermission.objects.with_user(request.user).filter(can_add=True, sites__in=[page.site_id])):
has_add_on_same_level_permission = True
if not has_add_on_same_level_permission and page.parent_id:
has_add_on_same_level_permission = has_generic_permission(page.parent_id, request.user, "add", page.site)
#has_add_on_same_level_permission = has_add_page_on_same_level_permission(request, page)
context = {
'page': page,
'site': site,
'lang': lang,
'filtered': filtered,
'metadata': metadata,
'has_change_permission': page.has_change_permission(request),
'has_publish_permission': page.has_publish_permission(request),
'has_delete_permission': page.has_delete_permission(request),
'has_move_page_permission': has_move_page_permission,
'has_add_page_permission': has_add_page_permission,
'has_moderate_permission': page.has_moderate_permission(request),
'page_moderator_state': moderator_state,
'moderator_should_approve': moderator_state['state'] >= I_APPROVE,
'has_add_on_same_level_permission': has_add_on_same_level_permission,
'CMS_PERMISSION': settings.CMS_PERMISSION,
'CMS_MODERATOR': settings.CMS_MODERATOR,
}
return context
NOT_FOUND_RESPONSE = "NotFound"
def render_admin_menu_item(request, page):
"""Renders requested page item for the tree. This is used in case when item
must be reloaded over ajax.
"""
if not page.pk:
return HttpResponse(NOT_FOUND_RESPONSE) # Not found - tree will remove item
# languages
languages = []
if page.site_id in settings.CMS_SITE_LANGUAGES:
languages = settings.CMS_SITE_LANGUAGES[page.site_id]
else:
languages = [x[0] for x in settings.CMS_LANGUAGES]
context = RequestContext(request, {
'has_add_permission': has_page_add_permission(request),
'site_languages': languages,
})
filtered = 'filtered' in request.REQUEST
context.update(get_admin_menu_item_context(request, page, filtered))
return render_to_response('admin/cms/page/menu_item.html', context)
|
bsd-3-clause
|
RockySteveJobs/python-for-android
|
python3-alpha/python3-src/Lib/optparse.py
|
45
|
60573
|
"""A powerful, extensible, and easy-to-use option parser.
By Greg Ward <gward@python.net>
Originally distributed as Optik.
For support, use the optik-users@lists.sourceforge.net mailing list
(http://lists.sourceforge.net/lists/listinfo/optik-users).
Simple usage example:
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="write report to FILE", metavar="FILE")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose", default=True,
help="don't print status messages to stdout")
(options, args) = parser.parse_args()
"""
__version__ = "1.5.3"
__all__ = ['Option',
'make_option',
'SUPPRESS_HELP',
'SUPPRESS_USAGE',
'Values',
'OptionContainer',
'OptionGroup',
'OptionParser',
'HelpFormatter',
'IndentedHelpFormatter',
'TitledHelpFormatter',
'OptParseError',
'OptionError',
'OptionConflictError',
'OptionValueError',
'BadOptionError']
__copyright__ = """
Copyright (c) 2001-2006 Gregory P. Ward. All rights reserved.
Copyright (c) 2002-2006 Python Software Foundation. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import sys, os
import textwrap
def _repr(self):
return "<%s at 0x%x: %s>" % (self.__class__.__name__, id(self), self)
# This file was generated from:
# Id: option_parser.py 527 2006-07-23 15:21:30Z greg
# Id: option.py 522 2006-06-11 16:22:03Z gward
# Id: help.py 527 2006-07-23 15:21:30Z greg
# Id: errors.py 509 2006-04-20 00:58:24Z gward
try:
from gettext import gettext
except ImportError:
def gettext(message):
return message
_ = gettext
class OptParseError (Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class OptionError (OptParseError):
"""
Raised if an Option instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class OptionConflictError (OptionError):
"""
Raised if conflicting options are added to an OptionParser.
"""
class OptionValueError (OptParseError):
"""
Raised if an invalid option value is encountered on the command
line.
"""
class BadOptionError (OptParseError):
"""
Raised if an invalid option is seen on the command line.
"""
def __init__(self, opt_str):
self.opt_str = opt_str
def __str__(self):
return _("no such option: %s") % self.opt_str
class AmbiguousOptionError (BadOptionError):
"""
Raised if an ambiguous option is seen on the command line.
"""
def __init__(self, opt_str, possibilities):
BadOptionError.__init__(self, opt_str)
self.possibilities = possibilities
def __str__(self):
return (_("ambiguous option: %s (%s?)")
% (self.opt_str, ", ".join(self.possibilities)))
class HelpFormatter:
"""
Abstract base class for formatting option help. OptionParser
instances should use one of the HelpFormatter subclasses for
formatting help; by default IndentedHelpFormatter is used.
Instance attributes:
parser : OptionParser
the controlling OptionParser instance
indent_increment : int
the number of columns to indent per nesting level
max_help_position : int
the maximum starting column for option help text
help_position : int
the calculated starting column for option help text;
initially the same as the maximum
width : int
total number of columns for output (pass None to constructor for
this value to be taken from the $COLUMNS environment variable)
level : int
current indentation level
current_indent : int
current indentation level (in columns)
help_width : int
number of columns available for option help text (calculated)
default_tag : str
text to replace with each option's default value, "%default"
by default. Set to false value to disable default value expansion.
option_strings : { Option : str }
maps Option instances to the snippet of help text explaining
the syntax of that option, e.g. "-h, --help" or
"-fFILE, --file=FILE"
_short_opt_fmt : str
format string controlling how short options with values are
printed in help text. Must be either "%s%s" ("-fFILE") or
"%s %s" ("-f FILE"), because those are the two syntaxes that
Optik supports.
_long_opt_fmt : str
similar but for long options; must be either "%s %s" ("--file FILE")
or "%s=%s" ("--file=FILE").
"""
NO_DEFAULT_VALUE = "none"
def __init__(self,
indent_increment,
max_help_position,
width,
short_first):
self.parser = None
self.indent_increment = indent_increment
self.help_position = self.max_help_position = max_help_position
if width is None:
try:
width = int(os.environ['COLUMNS'])
except (KeyError, ValueError):
width = 80
width -= 2
self.width = width
self.current_indent = 0
self.level = 0
self.help_width = None # computed later
self.short_first = short_first
self.default_tag = "%default"
self.option_strings = {}
self._short_opt_fmt = "%s %s"
self._long_opt_fmt = "%s=%s"
def set_parser(self, parser):
self.parser = parser
def set_short_opt_delimiter(self, delim):
if delim not in ("", " "):
raise ValueError(
"invalid metavar delimiter for short options: %r" % delim)
self._short_opt_fmt = "%s" + delim + "%s"
def set_long_opt_delimiter(self, delim):
if delim not in ("=", " "):
raise ValueError(
"invalid metavar delimiter for long options: %r" % delim)
self._long_opt_fmt = "%s" + delim + "%s"
def indent(self):
self.current_indent += self.indent_increment
self.level += 1
def dedent(self):
self.current_indent -= self.indent_increment
assert self.current_indent >= 0, "Indent decreased below 0."
self.level -= 1
def format_usage(self, usage):
raise NotImplementedError("subclasses must implement")
def format_heading(self, heading):
raise NotImplementedError("subclasses must implement")
def _format_text(self, text):
"""
Format a paragraph of free-form text for inclusion in the
help output at the current indentation level.
"""
text_width = self.width - self.current_indent
indent = " "*self.current_indent
return textwrap.fill(text,
text_width,
initial_indent=indent,
subsequent_indent=indent)
def format_description(self, description):
if description:
return self._format_text(description) + "\n"
else:
return ""
def format_epilog(self, epilog):
if epilog:
return "\n" + self._format_text(epilog) + "\n"
else:
return ""
def expand_default(self, option):
if self.parser is None or not self.default_tag:
return option.help
default_value = self.parser.defaults.get(option.dest)
if default_value is NO_DEFAULT or default_value is None:
default_value = self.NO_DEFAULT_VALUE
return option.help.replace(self.default_tag, str(default_value))
def format_option(self, option):
# The help for each option consists of two parts:
# * the opt strings and metavars
# eg. ("-x", or "-fFILENAME, --file=FILENAME")
# * the user-supplied help string
# eg. ("turn on expert mode", "read data from FILENAME")
#
# If possible, we write both of these on the same line:
# -x turn on expert mode
#
# But if the opt string list is too long, we put the help
# string on a second line, indented to the same column it would
# start in if it fit on the first line.
# -fFILENAME, --file=FILENAME
# read data from FILENAME
result = []
opts = self.option_strings[option]
opt_width = self.help_position - self.current_indent - 2
if len(opts) > opt_width:
opts = "%*s%s\n" % (self.current_indent, "", opts)
indent_first = self.help_position
else: # start help on same line as opts
opts = "%*s%-*s " % (self.current_indent, "", opt_width, opts)
indent_first = 0
result.append(opts)
if option.help:
help_text = self.expand_default(option)
help_lines = textwrap.wrap(help_text, self.help_width)
result.append("%*s%s\n" % (indent_first, "", help_lines[0]))
result.extend(["%*s%s\n" % (self.help_position, "", line)
for line in help_lines[1:]])
elif opts[-1] != "\n":
result.append("\n")
return "".join(result)
def store_option_strings(self, parser):
self.indent()
max_len = 0
for opt in parser.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.indent()
for group in parser.option_groups:
for opt in group.option_list:
strings = self.format_option_strings(opt)
self.option_strings[opt] = strings
max_len = max(max_len, len(strings) + self.current_indent)
self.dedent()
self.dedent()
self.help_position = min(max_len + 2, self.max_help_position)
self.help_width = self.width - self.help_position
def format_option_strings(self, option):
"""Return a comma-separated list of option strings & metavariables."""
if option.takes_value():
metavar = option.metavar or option.dest.upper()
short_opts = [self._short_opt_fmt % (sopt, metavar)
for sopt in option._short_opts]
long_opts = [self._long_opt_fmt % (lopt, metavar)
for lopt in option._long_opts]
else:
short_opts = option._short_opts
long_opts = option._long_opts
if self.short_first:
opts = short_opts + long_opts
else:
opts = long_opts + short_opts
return ", ".join(opts)
class IndentedHelpFormatter (HelpFormatter):
"""Format help with indented section bodies.
"""
def __init__(self,
indent_increment=2,
max_help_position=24,
width=None,
short_first=1):
HelpFormatter.__init__(
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return _("Usage: %s\n") % usage
def format_heading(self, heading):
return "%*s%s:\n" % (self.current_indent, "", heading)
class TitledHelpFormatter (HelpFormatter):
"""Format help with underlined section headers.
"""
def __init__(self,
indent_increment=0,
max_help_position=24,
width=None,
short_first=0):
HelpFormatter.__init__ (
self, indent_increment, max_help_position, width, short_first)
def format_usage(self, usage):
return "%s %s\n" % (self.format_heading(_("Usage")), usage)
def format_heading(self, heading):
return "%s\n%s\n" % (heading, "=-"[self.level] * len(heading))
def _parse_num(val, type):
if val[:2].lower() == "0x": # hexadecimal
radix = 16
elif val[:2].lower() == "0b": # binary
radix = 2
val = val[2:] or "0" # have to remove "0b" prefix
elif val[:1] == "0": # octal
radix = 8
else: # decimal
radix = 10
return type(val, radix)
def _parse_int(val):
return _parse_num(val, int)
def _parse_long(val):
return _parse_num(val, int)
_builtin_cvt = { "int" : (_parse_int, _("integer")),
"long" : (_parse_long, _("long integer")),
"float" : (float, _("floating-point")),
"complex" : (complex, _("complex")) }
def check_builtin(option, opt, value):
(cvt, what) = _builtin_cvt[option.type]
try:
return cvt(value)
except ValueError:
raise OptionValueError(
_("option %s: invalid %s value: %r") % (opt, what, value))
def check_choice(option, opt, value):
if value in option.choices:
return value
else:
choices = ", ".join(map(repr, option.choices))
raise OptionValueError(
_("option %s: invalid choice: %r (choose from %s)")
% (opt, value, choices))
# Not supplying a default is different from a default of None,
# so we need an explicit "not supplied" value.
NO_DEFAULT = ("NO", "DEFAULT")
class Option:
"""
Instance attributes:
_short_opts : [string]
_long_opts : [string]
action : string
type : string
dest : string
default : any
nargs : int
const : any
choices : [string]
callback : function
callback_args : (any*)
callback_kwargs : { string : any }
help : string
metavar : string
"""
# The list of instance attributes that may be set through
# keyword args to the constructor.
ATTRS = ['action',
'type',
'dest',
'default',
'nargs',
'const',
'choices',
'callback',
'callback_args',
'callback_kwargs',
'help',
'metavar']
# The set of actions allowed by option parsers. Explicitly listed
# here so the constructor can validate its arguments.
ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count",
"callback",
"help",
"version")
# The set of actions that involve storing a value somewhere;
# also listed just for constructor argument validation. (If
# the action is one of these, there must be a destination.)
STORE_ACTIONS = ("store",
"store_const",
"store_true",
"store_false",
"append",
"append_const",
"count")
# The set of actions for which it makes sense to supply a value
# type, ie. which may consume an argument from the command line.
TYPED_ACTIONS = ("store",
"append",
"callback")
# The set of actions which *require* a value type, ie. that
# always consume an argument from the command line.
ALWAYS_TYPED_ACTIONS = ("store",
"append")
# The set of actions which take a 'const' attribute.
CONST_ACTIONS = ("store_const",
"append_const")
# The set of known types for option parsers. Again, listed here for
# constructor argument validation.
TYPES = ("string", "int", "long", "float", "complex", "choice")
# Dictionary of argument checking functions, which convert and
# validate option arguments according to the option type.
#
# Signature of checking functions is:
# check(option : Option, opt : string, value : string) -> any
# where
# option is the Option instance calling the checker
# opt is the actual option seen on the command-line
# (eg. "-a", "--file")
# value is the option argument seen on the command-line
#
# The return value should be in the appropriate Python type
# for option.type -- eg. an integer if option.type == "int".
#
# If no checker is defined for a type, arguments will be
# unchecked and remain strings.
TYPE_CHECKER = { "int" : check_builtin,
"long" : check_builtin,
"float" : check_builtin,
"complex": check_builtin,
"choice" : check_choice,
}
# CHECK_METHODS is a list of unbound method objects; they are called
# by the constructor, in order, after all attributes are
# initialized. The list is created and filled in later, after all
# the methods are actually defined. (I just put it here because I
# like to define and document all class attributes in the same
# place.) Subclasses that add another _check_*() method should
# define their own CHECK_METHODS list that adds their check method
# to those from this class.
CHECK_METHODS = None
# -- Constructor/initialization methods ----------------------------
def __init__(self, *opts, **attrs):
# Set _short_opts, _long_opts attrs from 'opts' tuple.
# Have to be set now, in case no option strings are supplied.
self._short_opts = []
self._long_opts = []
opts = self._check_opt_strings(opts)
self._set_opt_strings(opts)
# Set all other attrs (action, type, etc.) from 'attrs' dict
self._set_attrs(attrs)
# Check all the attributes we just set. There are lots of
# complicated interdependencies, but luckily they can be farmed
# out to the _check_*() methods listed in CHECK_METHODS -- which
# could be handy for subclasses! The one thing these all share
# is that they raise OptionError if they discover a problem.
for checker in self.CHECK_METHODS:
checker(self)
def _check_opt_strings(self, opts):
# Filter out None because early versions of Optik had exactly
# one short option and one long option, either of which
# could be None.
opts = [opt for opt in opts if opt]
if not opts:
raise TypeError("at least one option string must be supplied")
return opts
def _set_opt_strings(self, opts):
for opt in opts:
if len(opt) < 2:
raise OptionError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise OptionError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise OptionError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def _set_attrs(self, attrs):
for attr in self.ATTRS:
if attr in attrs:
setattr(self, attr, attrs[attr])
del attrs[attr]
else:
if attr == 'default':
setattr(self, attr, NO_DEFAULT)
else:
setattr(self, attr, None)
if attrs:
attrs = sorted(attrs.keys())
raise OptionError(
"invalid keyword arguments: %s" % ", ".join(attrs),
self)
# -- Constructor validation methods --------------------------------
def _check_action(self):
if self.action is None:
self.action = "store"
elif self.action not in self.ACTIONS:
raise OptionError("invalid action: %r" % self.action, self)
def _check_type(self):
if self.type is None:
if self.action in self.ALWAYS_TYPED_ACTIONS:
if self.choices is not None:
# The "choices" attribute implies "choice" type.
self.type = "choice"
else:
# No type given? "string" is the most sensible default.
self.type = "string"
else:
# Allow type objects or builtin type conversion functions
# (int, str, etc.) as an alternative to their names. (The
# complicated check of builtins is only necessary for
# Python 2.1 and earlier, and is short-circuited by the
# first check on modern Pythons.)
import builtins
if ( isinstance(self.type, type) or
(hasattr(self.type, "__name__") and
getattr(builtins, self.type.__name__, None) is self.type) ):
self.type = self.type.__name__
if self.type == "str":
self.type = "string"
if self.type not in self.TYPES:
raise OptionError("invalid option type: %r" % self.type, self)
if self.action not in self.TYPED_ACTIONS:
raise OptionError(
"must not supply a type for action %r" % self.action, self)
def _check_choice(self):
if self.type == "choice":
if self.choices is None:
raise OptionError(
"must supply a list of choices for type 'choice'", self)
elif not isinstance(self.choices, (tuple, list)):
raise OptionError(
"choices must be a list of strings ('%s' supplied)"
% str(type(self.choices)).split("'")[1], self)
elif self.choices is not None:
raise OptionError(
"must not supply choices for type %r" % self.type, self)
def _check_dest(self):
# No destination given, and we need one for this action. The
# self.type check is for callbacks that take a value.
takes_value = (self.action in self.STORE_ACTIONS or
self.type is not None)
if self.dest is None and takes_value:
# Glean a destination from the first long option string,
# or from the first short option string if no long options.
if self._long_opts:
# eg. "--foo-bar" -> "foo_bar"
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
self.dest = self._short_opts[0][1]
def _check_const(self):
if self.action not in self.CONST_ACTIONS and self.const is not None:
raise OptionError(
"'const' must not be supplied for action %r" % self.action,
self)
def _check_nargs(self):
if self.action in self.TYPED_ACTIONS:
if self.nargs is None:
self.nargs = 1
elif self.nargs is not None:
raise OptionError(
"'nargs' must not be supplied for action %r" % self.action,
self)
def _check_callback(self):
if self.action == "callback":
if not hasattr(self.callback, '__call__'):
raise OptionError(
"callback not callable: %r" % self.callback, self)
if (self.callback_args is not None and
not isinstance(self.callback_args, tuple)):
raise OptionError(
"callback_args, if supplied, must be a tuple: not %r"
% self.callback_args, self)
if (self.callback_kwargs is not None and
not isinstance(self.callback_kwargs, dict)):
raise OptionError(
"callback_kwargs, if supplied, must be a dict: not %r"
% self.callback_kwargs, self)
else:
if self.callback is not None:
raise OptionError(
"callback supplied (%r) for non-callback option"
% self.callback, self)
if self.callback_args is not None:
raise OptionError(
"callback_args supplied for non-callback option", self)
if self.callback_kwargs is not None:
raise OptionError(
"callback_kwargs supplied for non-callback option", self)
CHECK_METHODS = [_check_action,
_check_type,
_check_choice,
_check_dest,
_check_const,
_check_nargs,
_check_callback]
# -- Miscellaneous methods -----------------------------------------
def __str__(self):
return "/".join(self._short_opts + self._long_opts)
__repr__ = _repr
def takes_value(self):
return self.type is not None
def get_opt_string(self):
if self._long_opts:
return self._long_opts[0]
else:
return self._short_opts[0]
# -- Processing methods --------------------------------------------
def check_value(self, opt, value):
checker = self.TYPE_CHECKER.get(self.type)
if checker is None:
return value
else:
return checker(self, opt, value)
def convert_value(self, opt, value):
if value is not None:
if self.nargs == 1:
return self.check_value(opt, value)
else:
return tuple([self.check_value(opt, v) for v in value])
def process(self, opt, value, values, parser):
# First, convert the value(s) to the right type. Howl if any
# value(s) are bogus.
value = self.convert_value(opt, value)
# And then take whatever action is expected of us.
# This is a separate method to make life easier for
# subclasses to add new actions.
return self.take_action(
self.action, self.dest, opt, value, values, parser)
def take_action(self, action, dest, opt, value, values, parser):
if action == "store":
setattr(values, dest, value)
elif action == "store_const":
setattr(values, dest, self.const)
elif action == "store_true":
setattr(values, dest, True)
elif action == "store_false":
setattr(values, dest, False)
elif action == "append":
values.ensure_value(dest, []).append(value)
elif action == "append_const":
values.ensure_value(dest, []).append(self.const)
elif action == "count":
setattr(values, dest, values.ensure_value(dest, 0) + 1)
elif action == "callback":
args = self.callback_args or ()
kwargs = self.callback_kwargs or {}
self.callback(self, opt, value, parser, *args, **kwargs)
elif action == "help":
parser.print_help()
parser.exit()
elif action == "version":
parser.print_version()
parser.exit()
else:
raise ValueError("unknown action %r" % self.action)
return 1
# class Option
SUPPRESS_HELP = "SUPPRESS"+"HELP"
SUPPRESS_USAGE = "SUPPRESS"+"USAGE"
class Values:
def __init__(self, defaults=None):
if defaults:
for (attr, val) in defaults.items():
setattr(self, attr, val)
def __str__(self):
return str(self.__dict__)
__repr__ = _repr
def __eq__(self, other):
if isinstance(other, Values):
return self.__dict__ == other.__dict__
elif isinstance(other, dict):
return self.__dict__ == other
else:
return NotImplemented
def _update_careful(self, dict):
"""
Update the option values from an arbitrary dictionary, but only
use keys from dict that already have a corresponding attribute
in self. Any keys in dict without a corresponding attribute
are silently ignored.
"""
for attr in dir(self):
if attr in dict:
dval = dict[attr]
if dval is not None:
setattr(self, attr, dval)
def _update_loose(self, dict):
"""
Update the option values from an arbitrary dictionary,
using all keys from the dictionary regardless of whether
they have a corresponding attribute in self or not.
"""
self.__dict__.update(dict)
def _update(self, dict, mode):
if mode == "careful":
self._update_careful(dict)
elif mode == "loose":
self._update_loose(dict)
else:
raise ValueError("invalid update mode: %r" % mode)
def read_module(self, modname, mode="careful"):
__import__(modname)
mod = sys.modules[modname]
self._update(vars(mod), mode)
def read_file(self, filename, mode="careful"):
vars = {}
exec(open(filename).read(), vars)
self._update(vars, mode)
def ensure_value(self, attr, value):
if not hasattr(self, attr) or getattr(self, attr) is None:
setattr(self, attr, value)
return getattr(self, attr)
class OptionContainer:
"""
Abstract base class.
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
option_list : [Option]
the list of Option objects contained by this OptionContainer
_short_opt : { string : Option }
dictionary mapping short option strings, eg. "-f" or "-X",
to the Option instances that implement them. If an Option
has multiple short option strings, it will appears in this
dictionary multiple times. [1]
_long_opt : { string : Option }
dictionary mapping long option strings, eg. "--file" or
"--exclude", to the Option instances that implement them.
Again, a given Option can occur multiple times in this
dictionary. [1]
defaults : { string : any }
dictionary mapping option destination names to default
values for each destination [1]
[1] These mappings are common to (shared by) all components of the
controlling OptionParser, where they are initially created.
"""
def __init__(self, option_class, conflict_handler, description):
# Initialize the option list and related data structures.
# This method must be provided by subclasses, and it must
# initialize at least the following instance attributes:
# option_list, _short_opt, _long_opt, defaults.
self._create_option_list()
self.option_class = option_class
self.set_conflict_handler(conflict_handler)
self.set_description(description)
def _create_option_mappings(self):
# For use by OptionParser constructor -- create the master
# option mappings used by this OptionParser and all
# OptionGroups that it owns.
self._short_opt = {} # single letter -> Option instance
self._long_opt = {} # long option -> Option instance
self.defaults = {} # maps option dest -> default value
def _share_option_mappings(self, parser):
# For use by OptionGroup constructor -- use shared option
# mappings from the OptionParser that owns this OptionGroup.
self._short_opt = parser._short_opt
self._long_opt = parser._long_opt
self.defaults = parser.defaults
def set_conflict_handler(self, handler):
if handler not in ("error", "resolve"):
raise ValueError("invalid conflict_resolution value %r" % handler)
self.conflict_handler = handler
def set_description(self, description):
self.description = description
def get_description(self):
return self.description
def destroy(self):
"""see OptionParser.destroy()."""
del self._short_opt
del self._long_opt
del self.defaults
# -- Option-adding methods -----------------------------------------
def _check_conflict(self, option):
conflict_opts = []
for opt in option._short_opts:
if opt in self._short_opt:
conflict_opts.append((opt, self._short_opt[opt]))
for opt in option._long_opts:
if opt in self._long_opt:
conflict_opts.append((opt, self._long_opt[opt]))
if conflict_opts:
handler = self.conflict_handler
if handler == "error":
raise OptionConflictError(
"conflicting option string(s): %s"
% ", ".join([co[0] for co in conflict_opts]),
option)
elif handler == "resolve":
for (opt, c_option) in conflict_opts:
if opt.startswith("--"):
c_option._long_opts.remove(opt)
del self._long_opt[opt]
else:
c_option._short_opts.remove(opt)
del self._short_opt[opt]
if not (c_option._short_opts or c_option._long_opts):
c_option.container.option_list.remove(c_option)
def add_option(self, *args, **kwargs):
"""add_option(Option)
add_option(opt_str, ..., kwarg=val, ...)
"""
if isinstance(args[0], str):
option = self.option_class(*args, **kwargs)
elif len(args) == 1 and not kwargs:
option = args[0]
if not isinstance(option, Option):
raise TypeError("not an Option instance: %r" % option)
else:
raise TypeError("invalid arguments")
self._check_conflict(option)
self.option_list.append(option)
option.container = self
for opt in option._short_opts:
self._short_opt[opt] = option
for opt in option._long_opts:
self._long_opt[opt] = option
if option.dest is not None: # option has a dest, we need a default
if option.default is not NO_DEFAULT:
self.defaults[option.dest] = option.default
elif option.dest not in self.defaults:
self.defaults[option.dest] = None
return option
def add_options(self, option_list):
for option in option_list:
self.add_option(option)
# -- Option query/removal methods ----------------------------------
def get_option(self, opt_str):
return (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
def has_option(self, opt_str):
return (opt_str in self._short_opt or
opt_str in self._long_opt)
def remove_option(self, opt_str):
option = self._short_opt.get(opt_str)
if option is None:
option = self._long_opt.get(opt_str)
if option is None:
raise ValueError("no such option %r" % opt_str)
for opt in option._short_opts:
del self._short_opt[opt]
for opt in option._long_opts:
del self._long_opt[opt]
option.container.option_list.remove(option)
# -- Help-formatting methods ---------------------------------------
def format_option_help(self, formatter):
if not self.option_list:
return ""
result = []
for option in self.option_list:
if not option.help is SUPPRESS_HELP:
result.append(formatter.format_option(option))
return "".join(result)
def format_description(self, formatter):
return formatter.format_description(self.get_description())
def format_help(self, formatter):
result = []
if self.description:
result.append(self.format_description(formatter))
if self.option_list:
result.append(self.format_option_help(formatter))
return "\n".join(result)
class OptionGroup (OptionContainer):
def __init__(self, parser, title, description=None):
self.parser = parser
OptionContainer.__init__(
self, parser.option_class, parser.conflict_handler, description)
self.title = title
def _create_option_list(self):
self.option_list = []
self._share_option_mappings(self.parser)
def set_title(self, title):
self.title = title
def destroy(self):
"""see OptionParser.destroy()."""
OptionContainer.destroy(self)
del self.option_list
# -- Help-formatting methods ---------------------------------------
def format_help(self, formatter):
result = formatter.format_heading(self.title)
formatter.indent()
result += OptionContainer.format_help(self, formatter)
formatter.dedent()
return result
class OptionParser (OptionContainer):
"""
Class attributes:
standard_option_list : [Option]
list of standard options that will be accepted by all instances
of this parser class (intended to be overridden by subclasses).
Instance attributes:
usage : string
a usage string for your program. Before it is displayed
to the user, "%prog" will be expanded to the name of
your program (self.prog or os.path.basename(sys.argv[0])).
prog : string
the name of the current program (to override
os.path.basename(sys.argv[0])).
description : string
A paragraph of text giving a brief overview of your program.
optparse reformats this paragraph to fit the current terminal
width and prints it when the user requests help (after usage,
but before the list of options).
epilog : string
paragraph of help text to print after option help
option_groups : [OptionGroup]
list of option groups in this parser (option groups are
irrelevant for parsing the command-line, but very useful
for generating help)
allow_interspersed_args : bool = true
if true, positional arguments may be interspersed with options.
Assuming -a and -b each take a single argument, the command-line
-ablah foo bar -bboo baz
will be interpreted the same as
-ablah -bboo -- foo bar baz
If this flag were false, that command line would be interpreted as
-ablah -- foo bar -bboo baz
-- ie. we stop processing options as soon as we see the first
non-option argument. (This is the tradition followed by
Python's getopt module, Perl's Getopt::Std, and other argument-
parsing libraries, but it is generally annoying to users.)
process_default_values : bool = true
if true, option default values are processed similarly to option
values from the command line: that is, they are passed to the
type-checking function for the option's type (as long as the
default value is a string). (This really only matters if you
have defined custom types; see SF bug #955889.) Set it to false
to restore the behaviour of Optik 1.4.1 and earlier.
rargs : [string]
the argument list currently being parsed. Only set when
parse_args() is active, and continually trimmed down as
we consume arguments. Mainly there for the benefit of
callback options.
largs : [string]
the list of leftover arguments that we have skipped while
parsing options. If allow_interspersed_args is false, this
list is always empty.
values : Values
the set of option values currently being accumulated. Only
set when parse_args() is active. Also mainly for callbacks.
Because of the 'rargs', 'largs', and 'values' attributes,
OptionParser is not thread-safe. If, for some perverse reason, you
need to parse command-line arguments simultaneously in different
threads, use different OptionParser instances.
"""
standard_option_list = []
def __init__(self,
usage=None,
option_list=None,
option_class=Option,
version=None,
conflict_handler="error",
description=None,
formatter=None,
add_help_option=True,
prog=None,
epilog=None):
OptionContainer.__init__(
self, option_class, conflict_handler, description)
self.set_usage(usage)
self.prog = prog
self.version = version
self.allow_interspersed_args = True
self.process_default_values = True
if formatter is None:
formatter = IndentedHelpFormatter()
self.formatter = formatter
self.formatter.set_parser(self)
self.epilog = epilog
# Populate the option list; initial sources are the
# standard_option_list class attribute, the 'option_list'
# argument, and (if applicable) the _add_version_option() and
# _add_help_option() methods.
self._populate_option_list(option_list,
add_help=add_help_option)
self._init_parsing_state()
def destroy(self):
"""
Declare that you are done with this OptionParser. This cleans up
reference cycles so the OptionParser (and all objects referenced by
it) can be garbage-collected promptly. After calling destroy(), the
OptionParser is unusable.
"""
OptionContainer.destroy(self)
for group in self.option_groups:
group.destroy()
del self.option_list
del self.option_groups
del self.formatter
# -- Private methods -----------------------------------------------
# (used by our or OptionContainer's constructor)
def _create_option_list(self):
self.option_list = []
self.option_groups = []
self._create_option_mappings()
def _add_help_option(self):
self.add_option("-h", "--help",
action="help",
help=_("show this help message and exit"))
def _add_version_option(self):
self.add_option("--version",
action="version",
help=_("show program's version number and exit"))
def _populate_option_list(self, option_list, add_help=True):
if self.standard_option_list:
self.add_options(self.standard_option_list)
if option_list:
self.add_options(option_list)
if self.version:
self._add_version_option()
if add_help:
self._add_help_option()
def _init_parsing_state(self):
# These are set in parse_args() for the convenience of callbacks.
self.rargs = None
self.largs = None
self.values = None
# -- Simple modifier methods ---------------------------------------
def set_usage(self, usage):
if usage is None:
self.usage = _("%prog [options]")
elif usage is SUPPRESS_USAGE:
self.usage = None
# For backwards compatibility with Optik 1.3 and earlier.
elif usage.lower().startswith("usage: "):
self.usage = usage[7:]
else:
self.usage = usage
def enable_interspersed_args(self):
"""Set parsing to not stop on the first non-option, allowing
interspersing switches with command arguments. This is the
default behavior. See also disable_interspersed_args() and the
class documentation description of the attribute
allow_interspersed_args."""
self.allow_interspersed_args = True
def disable_interspersed_args(self):
"""Set parsing to stop on the first non-option. Use this if
you have a command processor which runs another command that
has options of its own and you want to make sure these options
don't get confused.
"""
self.allow_interspersed_args = False
def set_process_default_values(self, process):
self.process_default_values = process
def set_default(self, dest, value):
self.defaults[dest] = value
def set_defaults(self, **kwargs):
self.defaults.update(kwargs)
def _get_all_options(self):
options = self.option_list[:]
for group in self.option_groups:
options.extend(group.option_list)
return options
def get_default_values(self):
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return Values(self.defaults)
defaults = self.defaults.copy()
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, str):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return Values(defaults)
# -- OptionGroup methods -------------------------------------------
def add_option_group(self, *args, **kwargs):
# XXX lots of overlap with OptionContainer.add_option()
if isinstance(args[0], str):
group = OptionGroup(self, *args, **kwargs)
elif len(args) == 1 and not kwargs:
group = args[0]
if not isinstance(group, OptionGroup):
raise TypeError("not an OptionGroup instance: %r" % group)
if group.parser is not self:
raise ValueError("invalid OptionGroup (wrong parser)")
else:
raise TypeError("invalid arguments")
self.option_groups.append(group)
return group
def get_option_group(self, opt_str):
option = (self._short_opt.get(opt_str) or
self._long_opt.get(opt_str))
if option and option.container is not self:
return option.container
return None
# -- Option-parsing methods ----------------------------------------
def _get_args(self, args):
if args is None:
return sys.argv[1:]
else:
return args[:] # don't modify caller's list
def parse_args(self, args=None, values=None):
"""
parse_args(args : [string] = sys.argv[1:],
values : Values = None)
-> (values : Values, args : [string])
Parse the command-line options found in 'args' (default:
sys.argv[1:]). Any errors result in a call to 'error()', which
by default prints the usage message to stderr and calls
sys.exit() with an error message. On success returns a pair
(values, args) where 'values' is an Values instance (with all
your option values) and 'args' is the list of arguments left
over after parsing options.
"""
rargs = self._get_args(args)
if values is None:
values = self.get_default_values()
# Store the halves of the argument list as attributes for the
# convenience of callbacks:
# rargs
# the rest of the command-line (the "r" stands for
# "remaining" or "right-hand")
# largs
# the leftover arguments -- ie. what's left after removing
# options and their arguments (the "l" stands for "leftover"
# or "left-hand")
self.rargs = rargs
self.largs = largs = []
self.values = values
try:
stop = self._process_args(largs, rargs, values)
except (BadOptionError, OptionValueError) as err:
self.error(str(err))
args = largs + rargs
return self.check_values(values, args)
def check_values(self, values, args):
"""
check_values(values : Values, args : [string])
-> (values : Values, args : [string])
Check that the supplied option values and leftover arguments are
valid. Returns the option values and leftover arguments
(possibly adjusted, possibly completely new -- whatever you
like). Default implementation just returns the passed-in
values; subclasses may override as desired.
"""
return (values, args)
def _process_args(self, largs, rargs, values):
"""_process_args(largs : [string],
rargs : [string],
values : Values)
Process command-line arguments and populate 'values', consuming
options and arguments from 'rargs'. If 'allow_interspersed_args' is
false, stop at the first non-option argument. If true, accumulate any
interspersed non-option arguments in 'largs'.
"""
while rargs:
arg = rargs[0]
# We handle bare "--" explicitly, and bare "-" is handled by the
# standard arg handler since the short arg case ensures that the
# len of the opt string is greater than 1.
if arg == "--":
del rargs[0]
return
elif arg[0:2] == "--":
# process a single long option (possibly with value(s))
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
self._process_short_opts(rargs, values)
elif self.allow_interspersed_args:
largs.append(arg)
del rargs[0]
else:
return # stop now, leave this arg in rargs
# Say this is the original argument list:
# [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
# ^
# (we are about to process arg(i)).
#
# Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
# [arg0, ..., arg(i-1)] (any options and their arguments will have
# been removed from largs).
#
# The while loop will usually consume 1 or more arguments per pass.
# If it consumes 1 (eg. arg is an option that takes no arguments),
# then after _process_arg() is done the situation is:
#
# largs = subset of [arg0, ..., arg(i)]
# rargs = [arg(i+1), ..., arg(N-1)]
#
# If allow_interspersed_args is false, largs will always be
# *empty* -- still a subset of [arg0, ..., arg(i-1)], but
# not a very interesting subset!
def _match_long_opt(self, opt):
"""_match_long_opt(opt : string) -> string
Determine which long option string 'opt' matches, ie. which one
it is an unambiguous abbrevation for. Raises BadOptionError if
'opt' doesn't unambiguously match any long option string.
"""
return _match_abbrev(opt, self._long_opt)
def _process_long_opt(self, rargs, values):
arg = rargs.pop(0)
# Value explicitly attached to arg? Pretend it's the next
# argument.
if "=" in arg:
(opt, next_arg) = arg.split("=", 1)
rargs.insert(0, next_arg)
had_explicit_value = True
else:
opt = arg
had_explicit_value = False
opt = self._match_long_opt(opt)
option = self._long_opt[opt]
if option.takes_value():
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
elif had_explicit_value:
self.error(_("%s option does not take a value") % opt)
else:
value = None
option.process(opt, value, values, self)
def _process_short_opts(self, rargs, values):
arg = rargs.pop(0)
stop = False
i = 1
for ch in arg[1:]:
opt = "-" + ch
option = self._short_opt.get(opt)
i += 1 # we have consumed a character
if not option:
raise BadOptionError(opt)
if option.takes_value():
# Any characters left in arg? Pretend they're the
# next arg, and stop consuming characters of arg.
if i < len(arg):
rargs.insert(0, arg[i:])
stop = True
nargs = option.nargs
if len(rargs) < nargs:
if nargs == 1:
self.error(_("%s option requires an argument") % opt)
else:
self.error(_("%s option requires %d arguments")
% (opt, nargs))
elif nargs == 1:
value = rargs.pop(0)
else:
value = tuple(rargs[0:nargs])
del rargs[0:nargs]
else: # option doesn't take a value
value = None
option.process(opt, value, values, self)
if stop:
break
# -- Feedback methods ----------------------------------------------
def get_prog_name(self):
if self.prog is None:
return os.path.basename(sys.argv[0])
else:
return self.prog
def expand_prog_name(self, s):
return s.replace("%prog", self.get_prog_name())
def get_description(self):
return self.expand_prog_name(self.description)
def exit(self, status=0, msg=None):
if msg:
sys.stderr.write(msg)
sys.exit(status)
def error(self, msg):
"""error(msg : string)
Print a usage message incorporating 'msg' to stderr and exit.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.
"""
self.print_usage(sys.stderr)
self.exit(2, "%s: error: %s\n" % (self.get_prog_name(), msg))
def get_usage(self):
if self.usage:
return self.formatter.format_usage(
self.expand_prog_name(self.usage))
else:
return ""
def print_usage(self, file=None):
"""print_usage(file : file = stdout)
Print the usage message for the current program (self.usage) to
'file' (default stdout). Any occurrence of the string "%prog" in
self.usage is replaced with the name of the current program
(basename of sys.argv[0]). Does nothing if self.usage is empty
or not defined.
"""
if self.usage:
print(self.get_usage(), file=file)
def get_version(self):
if self.version:
return self.expand_prog_name(self.version)
else:
return ""
def print_version(self, file=None):
"""print_version(file : file = stdout)
Print the version message for this program (self.version) to
'file' (default stdout). As with print_usage(), any occurrence
of "%prog" in self.version is replaced by the current program's
name. Does nothing if self.version is empty or undefined.
"""
if self.version:
print(self.get_version(), file=file)
def format_option_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
formatter.store_option_strings(self)
result = []
result.append(formatter.format_heading(_("Options")))
formatter.indent()
if self.option_list:
result.append(OptionContainer.format_option_help(self, formatter))
result.append("\n")
for group in self.option_groups:
result.append(group.format_help(formatter))
result.append("\n")
formatter.dedent()
# Drop the last "\n", or the header if no options or option groups:
return "".join(result[:-1])
def format_epilog(self, formatter):
return formatter.format_epilog(self.epilog)
def format_help(self, formatter=None):
if formatter is None:
formatter = self.formatter
result = []
if self.usage:
result.append(self.get_usage() + "\n")
if self.description:
result.append(self.format_description(formatter) + "\n")
result.append(self.format_option_help(formatter))
result.append(self.format_epilog(formatter))
return "".join(result)
def print_help(self, file=None):
"""print_help(file : file = stdout)
Print an extended help message, listing all options and any
help text provided with them, to 'file' (default stdout).
"""
if file is None:
file = sys.stdout
file.write(self.format_help())
# class OptionParser
def _match_abbrev(s, wordmap):
"""_match_abbrev(s : string, wordmap : {string : Option}) -> string
Return the string key in 'wordmap' for which 's' is an unambiguous
abbreviation. If 's' is found to be ambiguous or doesn't match any of
'words', raise BadOptionError.
"""
# Is there an exact match?
if s in wordmap:
return s
else:
# Isolate all words with s as a prefix.
possibilities = [word for word in wordmap.keys()
if word.startswith(s)]
# No exact match, so there had better be just one possibility.
if len(possibilities) == 1:
return possibilities[0]
elif not possibilities:
raise BadOptionError(s)
else:
# More than one possible completion: ambiguous prefix.
possibilities.sort()
raise AmbiguousOptionError(s, possibilities)
# Some day, there might be many Option classes. As of Optik 1.3, the
# preferred way to instantiate Options is indirectly, via make_option(),
# which will become a factory function when there are many Option
# classes.
make_option = Option
|
apache-2.0
|
filodej/django-filer
|
filer/tests/tools.py
|
12
|
2995
|
#-*- coding: utf-8 -*-
from django.core.files import File as DjangoFile
from django.test.testcases import TestCase
from filer.models import tools
from filer.models.clipboardmodels import Clipboard
from filer.models.foldermodels import Folder
from filer.models.imagemodels import Image
from filer.tests.helpers import create_superuser, create_image
import os
class ToolsTestCase(TestCase):
def tearDown(self):
self.client.logout()
os.remove(self.filename)
for img in Image.objects.all():
img.delete()
def create_fixtures(self):
self.superuser = create_superuser()
self.client.login(username='admin', password='secret')
self.img = create_image()
self.image_name = 'test_file.jpg'
self.filename = os.path.join(os.path.dirname(__file__),
self.image_name)
self.img.save(self.filename, 'JPEG')
self.file = DjangoFile(open(self.filename), name=self.image_name)
# This is actually a "file" for filer considerations
self.image = Image.objects.create(owner=self.superuser,
original_filename=self.image_name,
file=self.file)
self.clipboard = Clipboard.objects.create(user=self.superuser)
self.clipboard.append_file(self.image)
self.folder = Folder.objects.create(name='test_folder')
def test_clear_clipboard_works(self):
self.create_fixtures()
self.assertEqual(len(self.clipboard.files.all()), 1)
tools.discard_clipboard(self.clipboard)
self.assertEqual(len(self.clipboard.files.all()), 0)
def test_move_to_clipboard_works(self):
self.create_fixtures()
self.assertEqual(len(self.clipboard.files.all()), 1)
file2 = Image.objects.create(owner=self.superuser,
original_filename='file2',
file=self.file)
file3 = Image.objects.create(owner=self.superuser,
original_filename='file3',
file=self.file)
files = [file2, file3]
tools.move_file_to_clipboard(files, self.clipboard)
self.assertEqual(len(self.clipboard.files.all()), 3)
def test_move_from_clipboard_to_folder_works(self):
self.create_fixtures()
self.assertEqual(len(self.clipboard.files.all()), 1)
tools.move_files_from_clipboard_to_folder(self.clipboard, self.folder)
for file in self.clipboard.files.all():
self.assertEqual(file.folder, self.folder)
def test_delete_clipboard_works(self):
self.create_fixtures()
self.assertEqual(len(self.clipboard.files.all()), 1)
tools.delete_clipboard(self.clipboard)
# Assert there is no file with self.image_name = 'test_file.jpg'
result = Image.objects.filter(file=self.file)
self.assertEqual(len(result), 0)
|
bsd-3-clause
|
titansgroup/python-phonenumbers
|
python/phonenumbers/data/region_NZ.py
|
10
|
2889
|
"""Auto-generated file, do not edit by hand. NZ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_NZ = PhoneMetadata(id='NZ', country_code=64, international_prefix='0(?:0|161)',
general_desc=PhoneNumberDesc(national_number_pattern='6[235-9]\\d{6}|[2-57-9]\\d{7,10}', possible_number_pattern='\\d{7,11}'),
fixed_line=PhoneNumberDesc(national_number_pattern='(?:3[2-79]|[49][2-9]|6[235-9]|7[2-57-9])\\d{6}|24099\\d{3}', possible_number_pattern='\\d{7,8}', example_number='32345678'),
mobile=PhoneNumberDesc(national_number_pattern='2(?:[028]\\d{7,8}|1(?:[03]\\d{5,7}|[12457]\\d{5,6}|[689]\\d{5})|[79]\\d{7})', possible_number_pattern='\\d{8,10}', example_number='211234567'),
toll_free=PhoneNumberDesc(national_number_pattern='508\\d{6,7}|80\\d{6,8}', possible_number_pattern='\\d{8,10}', example_number='800123456'),
premium_rate=PhoneNumberDesc(national_number_pattern='90\\d{7,9}', possible_number_pattern='\\d{9,11}', example_number='900123456'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='70\\d{7}', possible_number_pattern='\\d{9}', example_number='701234567'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='[28]6\\d{6,7}', possible_number_pattern='\\d{8,9}', example_number='26123456'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
preferred_international_prefix='00',
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='([34679])(\\d{3})(\\d{4})', format='\\1-\\2 \\3', leading_digits_pattern=['[346]|7[2-57-9]|9[1-9]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(24099)(\\d{3})', format='\\1 \\2', leading_digits_pattern=['240', '2409', '24099'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['21'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3,5})', format='\\1 \\2 \\3', leading_digits_pattern=['2(?:1[1-9]|[69]|7[0-35-9])|70|86'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(2\\d)(\\d{3,4})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['2[028]'], national_prefix_formatting_rule='0\\1'),
NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['2(?:10|74)|5|[89]0'], national_prefix_formatting_rule='0\\1')],
mobile_number_portable_region=True)
|
apache-2.0
|
GhostThrone/django
|
tests/forms_tests/widget_tests/test_nullbooleanselect.py
|
179
|
2142
|
from django.forms import NullBooleanSelect
from django.test import override_settings
from django.utils import translation
from .base import WidgetTest
class NullBooleanSelectTest(WidgetTest):
widget = NullBooleanSelect()
def test_render_true(self):
self.check_html(self.widget, 'is_cool', True, html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>"""
))
def test_render_false(self):
self.check_html(self.widget, 'is_cool', False, html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2">Yes</option>
<option value="3" selected="selected">No</option>
</select>"""
))
def test_render_none(self):
self.check_html(self.widget, 'is_cool', None, html=(
"""<select name="is_cool">
<option value="1" selected="selected">Unknown</option>
<option value="2">Yes</option>
<option value="3">No</option>
</select>"""
))
def test_render_value(self):
self.check_html(self.widget, 'is_cool', '2', html=(
"""<select name="is_cool">
<option value="1">Unknown</option>
<option value="2" selected="selected">Yes</option>
<option value="3">No</option>
</select>"""
))
@override_settings(USE_L10N=True)
def test_l10n(self):
"""
Ensure that the NullBooleanSelect widget's options are lazily
localized (#17190).
"""
widget = NullBooleanSelect()
with translation.override('de-at'):
self.check_html(widget, 'id_bool', True, html=(
"""
<select name="id_bool">
<option value="1">Unbekannt</option>
<option value="2" selected="selected">Ja</option>
<option value="3">Nein</option>
</select>
"""
))
|
bsd-3-clause
|
535521469/crawler_sth
|
scrapy/tests/test_utils_spider.py
|
44
|
1083
|
import unittest
from scrapy.http import Request
from scrapy.item import BaseItem
from scrapy.utils.spider import iterate_spider_output, iter_spider_classes
from scrapy.contrib.spiders import CrawlSpider
class MyBaseSpider(CrawlSpider):
pass # abstract spider
class MySpider1(MyBaseSpider):
name = 'myspider1'
class MySpider2(MyBaseSpider):
name = 'myspider2'
class UtilsSpidersTestCase(unittest.TestCase):
def test_iterate_spider_output(self):
i = BaseItem()
r = Request('http://scrapytest.org')
o = object()
self.assertEqual(list(iterate_spider_output(i)), [i])
self.assertEqual(list(iterate_spider_output(r)), [r])
self.assertEqual(list(iterate_spider_output(o)), [o])
self.assertEqual(list(iterate_spider_output([r, i, o])), [r, i, o])
def test_iter_spider_classes(self):
import scrapy.tests.test_utils_spider
it = iter_spider_classes(scrapy.tests.test_utils_spider)
self.assertEqual(set(it), set([MySpider1, MySpider2]))
if __name__ == "__main__":
unittest.main()
|
bsd-3-clause
|
Dhivyap/ansible
|
lib/ansible/modules/cloud/google/gcp_compute_target_http_proxy.py
|
3
|
13840
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_target_http_proxy
description:
- Represents a TargetHttpProxy resource, which is used by one or more global forwarding
rule to route incoming HTTP requests to a URL map.
short_description: Creates a GCP TargetHttpProxy
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- An optional description of this resource.
required: false
type: str
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
url_map:
description:
- A reference to the UrlMap resource that defines the mapping from URL to the
BackendService.
- 'This field represents a link to a UrlMap resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_url_map task and then set this url_map field to "{{ name-of-resource
}}"'
required: true
type: dict
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/targetHttpProxies)'
- 'Official Documentation: U(https://cloud.google.com/compute/docs/load-balancing/http/target-proxies)'
- for authentication, you can set service_account_file using the c(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the c(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a instance group
gcp_compute_instance_group:
name: instancegroup-targethttpproxy
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: instancegroup
- name: create a HTTP health check
gcp_compute_http_health_check:
name: httphealthcheck-targethttpproxy
healthy_threshold: 10
port: 8080
timeout_sec: 2
unhealthy_threshold: 5
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: healthcheck
- name: create a backend service
gcp_compute_backend_service:
name: backendservice-targethttpproxy
backends:
- group: "{{ instancegroup.selfLink }}"
health_checks:
- "{{ healthcheck.selfLink }}"
enable_cdn: 'true'
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: backendservice
- name: create a URL map
gcp_compute_url_map:
name: urlmap-targethttpproxy
default_service: "{{ backendservice }}"
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: urlmap
- name: create a target HTTP proxy
gcp_compute_target_http_proxy:
name: test_object
url_map: "{{ urlmap }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
description:
description:
- An optional description of this resource.
returned: success
type: str
id:
description:
- The unique identifier for the resource.
returned: success
type: int
name:
description:
- Name of the resource. Provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
urlMap:
description:
- A reference to the UrlMap resource that defines the mapping from URL to the BackendService.
returned: success
type: dict
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, replace_resource_dict
import json
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
name=dict(required=True, type='str'),
url_map=dict(required=True, type='dict'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#targetHttpProxy'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('urlMap') != request.get('urlMap'):
url_map_update(module, request, response)
def url_map_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/targetHttpProxies/{name}/setUrlMap"]).format(**module.params),
{u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#targetHttpProxy',
u'description': module.params.get('description'),
u'name': module.params.get('name'),
u'urlMap': replace_resource_dict(module.params.get(u'url_map', {}), 'selfLink'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetHttpProxies/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/targetHttpProxies".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'creationTimestamp': response.get(u'creationTimestamp'),
u'description': response.get(u'description'),
u'id': response.get(u'id'),
u'name': response.get(u'name'),
u'urlMap': response.get(u'urlMap'),
}
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#targetHttpProxy')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
if __name__ == '__main__':
main()
|
gpl-3.0
|
keedio/hue
|
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geos/mutable_list.py
|
217
|
10972
|
# Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Released under the New BSD license.
"""
This module contains a base type which provides list-style mutations
without specific data storage methods.
See also http://www.aryehleib.com/MutableLists.html
Author: Aryeh Leib Taurog.
"""
from django.utils.functional import total_ordering
from django.utils import six
from django.utils.six.moves import xrange
@total_ordering
class ListMixin(object):
"""
A base class which provides complete list interface.
Derived classes must call ListMixin's __init__() function
and implement the following:
function _get_single_external(self, i):
Return single item with index i for general use.
The index i will always satisfy 0 <= i < len(self).
function _get_single_internal(self, i):
Same as above, but for use within the class [Optional]
Note that if _get_single_internal and _get_single_internal return
different types of objects, _set_list must distinguish
between the two and handle each appropriately.
function _set_list(self, length, items):
Recreate the entire object.
NOTE: items may be a generator which calls _get_single_internal.
Therefore, it is necessary to cache the values in a temporary:
temp = list(items)
before clobbering the original storage.
function _set_single(self, i, value):
Set the single item at index i to value [Optional]
If left undefined, all mutations will result in rebuilding
the object using _set_list.
function __len__(self):
Return the length
int _minlength:
The minimum legal length [Optional]
int _maxlength:
The maximum legal length [Optional]
type or tuple _allowed:
A type or tuple of allowed item types [Optional]
class _IndexError:
The type of exception to be raise on invalid index [Optional]
"""
_minlength = 0
_maxlength = None
_IndexError = IndexError
### Python initialization and special list interface methods ###
def __init__(self, *args, **kwargs):
if not hasattr(self, '_get_single_internal'):
self._get_single_internal = self._get_single_external
if not hasattr(self, '_set_single'):
self._set_single = self._set_single_rebuild
self._assign_extended_slice = self._assign_extended_slice_rebuild
super(ListMixin, self).__init__(*args, **kwargs)
def __getitem__(self, index):
"Get the item(s) at the specified index/slice."
if isinstance(index, slice):
return [self._get_single_external(i) for i in xrange(*index.indices(len(self)))]
else:
index = self._checkindex(index)
return self._get_single_external(index)
def __delitem__(self, index):
"Delete the item(s) at the specified index/slice."
if not isinstance(index, six.integer_types + (slice,)):
raise TypeError("%s is not a legal index" % index)
# calculate new length and dimensions
origLen = len(self)
if isinstance(index, six.integer_types):
index = self._checkindex(index)
indexRange = [index]
else:
indexRange = range(*index.indices(origLen))
newLen = origLen - len(indexRange)
newItems = ( self._get_single_internal(i)
for i in xrange(origLen)
if i not in indexRange )
self._rebuild(newLen, newItems)
def __setitem__(self, index, val):
"Set the item(s) at the specified index/slice."
if isinstance(index, slice):
self._set_slice(index, val)
else:
index = self._checkindex(index)
self._check_allowed((val,))
self._set_single(index, val)
def __iter__(self):
"Iterate over the items in the list"
for i in xrange(len(self)):
yield self[i]
### Special methods for arithmetic operations ###
def __add__(self, other):
'add another list-like object'
return self.__class__(list(self) + list(other))
def __radd__(self, other):
'add to another list-like object'
return other.__class__(list(other) + list(self))
def __iadd__(self, other):
'add another list-like object to self'
self.extend(list(other))
return self
def __mul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __rmul__(self, n):
'multiply'
return self.__class__(list(self) * n)
def __imul__(self, n):
'multiply'
if n <= 0:
del self[:]
else:
cache = list(self)
for i in range(n-1):
self.extend(cache)
return self
def __eq__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] == other[i]
except self._IndexError:
# self must be shorter
return False
if not c:
return False
return len(self) == olen
def __lt__(self, other):
olen = len(other)
for i in range(olen):
try:
c = self[i] < other[i]
except self._IndexError:
# self must be shorter
return True
if c:
return c
elif other[i] < self[i]:
return False
return len(self) < olen
### Public list interface Methods ###
## Non-mutating ##
def count(self, val):
"Standard list count method"
count = 0
for i in self:
if val == i: count += 1
return count
def index(self, val):
"Standard list index method"
for i in xrange(0, len(self)):
if self[i] == val: return i
raise ValueError('%s not found in object' % str(val))
## Mutating ##
def append(self, val):
"Standard list append method"
self[len(self):] = [val]
def extend(self, vals):
"Standard list extend method"
self[len(self):] = vals
def insert(self, index, val):
"Standard list insert method"
if not isinstance(index, six.integer_types):
raise TypeError("%s is not a legal index" % index)
self[index:index] = [val]
def pop(self, index=-1):
"Standard list pop method"
result = self[index]
del self[index]
return result
def remove(self, val):
"Standard list remove method"
del self[self.index(val)]
def reverse(self):
"Standard list reverse method"
self[:] = self[-1::-1]
def sort(self, cmp=None, key=None, reverse=False):
"Standard list sort method"
if key:
temp = [(key(v),v) for v in self]
temp.sort(key=lambda x: x[0], reverse=reverse)
self[:] = [v[1] for v in temp]
else:
temp = list(self)
if cmp is not None:
temp.sort(cmp=cmp, reverse=reverse)
else:
temp.sort(reverse=reverse)
self[:] = temp
### Private routines ###
def _rebuild(self, newLen, newItems):
if newLen < self._minlength:
raise ValueError('Must have at least %d items' % self._minlength)
if self._maxlength is not None and newLen > self._maxlength:
raise ValueError('Cannot have more than %d items' % self._maxlength)
self._set_list(newLen, newItems)
def _set_single_rebuild(self, index, value):
self._set_slice(slice(index, index + 1, 1), [value])
def _checkindex(self, index, correct=True):
length = len(self)
if 0 <= index < length:
return index
if correct and -length <= index < 0:
return index + length
raise self._IndexError('invalid index: %s' % str(index))
def _check_allowed(self, items):
if hasattr(self, '_allowed'):
if False in [isinstance(val, self._allowed) for val in items]:
raise TypeError('Invalid type encountered in the arguments.')
def _set_slice(self, index, values):
"Assign values to a slice of the object"
try:
iter(values)
except TypeError:
raise TypeError('can only assign an iterable to a slice')
self._check_allowed(values)
origLen = len(self)
valueList = list(values)
start, stop, step = index.indices(origLen)
# CAREFUL: index.step and step are not the same!
# step will never be None
if index.step is None:
self._assign_simple_slice(start, stop, valueList)
else:
self._assign_extended_slice(start, stop, step, valueList)
def _assign_extended_slice_rebuild(self, start, stop, step, valueList):
'Assign an extended slice by rebuilding entire list'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
# we're not changing the length of the sequence
newLen = len(self)
newVals = dict(zip(indexList, valueList))
def newItems():
for i in xrange(newLen):
if i in newVals:
yield newVals[i]
else:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
def _assign_extended_slice(self, start, stop, step, valueList):
'Assign an extended slice by re-assigning individual items'
indexList = range(start, stop, step)
# extended slice, only allow assigning slice of same size
if len(valueList) != len(indexList):
raise ValueError('attempt to assign sequence of size %d '
'to extended slice of size %d'
% (len(valueList), len(indexList)))
for i, val in zip(indexList, valueList):
self._set_single(i, val)
def _assign_simple_slice(self, start, stop, valueList):
'Assign a simple slice; Can assign slice of any length'
origLen = len(self)
stop = max(start, stop)
newLen = origLen - stop + start + len(valueList)
def newItems():
for i in xrange(origLen + 1):
if i == start:
for val in valueList:
yield val
if i < origLen:
if i < start or i >= stop:
yield self._get_single_internal(i)
self._rebuild(newLen, newItems())
|
apache-2.0
|
caisq/tensorflow
|
tensorflow/python/ops/linalg_ops.py
|
10
|
23635
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations for linear algebra."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_linalg_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# Names below are lower_case.
# pylint: disable=invalid-name
def _RegularizedGramianCholesky(matrix, l2_regularizer, first_kind):
r"""Computes Cholesky factorization of regularized gramian matrix.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`output`=\\(C \in \Re^{\min(m, n) \times \min(m,n)}\\),
`l2_regularizer`=\\(\lambda\\).
If `first_kind` is True, returns the Cholesky factorization \\(L\\) such that
\\(L L^H = A^H A + \lambda I\\).
If `first_kind` is False, returns the Cholesky factorization \\(L\\) such that
\\(L L^H = A A^H + \lambda I\\).
Args:
matrix: `Tensor` of shape `[..., M, N]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
first_kind: bool. Controls what gramian matrix to factor.
Returns:
output: `Tensor` of shape `[..., min(M,N), min(M,N)]` whose inner-most 2
dimensions contain the Cholesky factors \\(L\\) described above.
"""
gramian = math_ops.matmul(
matrix, matrix, adjoint_a=first_kind, adjoint_b=not first_kind)
if isinstance(l2_regularizer, ops.Tensor) or l2_regularizer != 0:
matrix_shape = array_ops.shape(matrix)
batch_shape = matrix_shape[:-2]
if first_kind:
small_dim = matrix_shape[-1]
else:
small_dim = matrix_shape[-2]
identity = eye(small_dim, batch_shape=batch_shape, dtype=matrix.dtype)
small_dim_static = matrix.shape[-1 if first_kind else -2]
identity.set_shape(
matrix.shape[:-2].concatenate([small_dim_static, small_dim_static]))
gramian += l2_regularizer * identity
return gen_linalg_ops.cholesky(gramian)
@tf_export('cholesky_solve', 'linalg.cholesky_solve')
def cholesky_solve(chol, rhs, name=None):
"""Solves systems of linear eqns `A X = RHS`, given Cholesky factorizations.
```python
# Solve 10 separate 2x2 linear systems:
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 1
chol = tf.cholesky(A) # shape 10 x 2 x 2
X = tf.cholesky_solve(chol, RHS) # shape 10 x 2 x 1
# tf.matmul(A, X) ~ RHS
X[3, :, 0] # Solution to the linear system A[3, :, :] x = RHS[3, :, 0]
# Solve five linear systems (K = 5) for every member of the length 10 batch.
A = ... # shape 10 x 2 x 2
RHS = ... # shape 10 x 2 x 5
...
X[3, :, 2] # Solution to the linear system A[3, :, :] x = RHS[3, :, 2]
```
Args:
chol: A `Tensor`. Must be `float32` or `float64`, shape is `[..., M, M]`.
Cholesky factorization of `A`, e.g. `chol = tf.cholesky(A)`.
For that reason, only the lower triangular parts (including the diagonal)
of the last two dimensions of `chol` are used. The strictly upper part is
assumed to be zero and not accessed.
rhs: A `Tensor`, same type as `chol`, shape is `[..., M, K]`.
name: A name to give this `Op`. Defaults to `cholesky_solve`.
Returns:
Solution to `A x = rhs`, shape `[..., M, K]`.
"""
# To solve C C^* x = rhs, we
# 1. Solve C y = rhs for y, thus y = C^* x
# 2. Solve C^* x = y for x
with ops.name_scope(name, 'cholesky_solve', [chol, rhs]):
y = gen_linalg_ops.matrix_triangular_solve(
chol, rhs, adjoint=False, lower=True)
x = gen_linalg_ops.matrix_triangular_solve(
chol, y, adjoint=True, lower=True)
return x
@tf_export('eye', 'linalg.eye')
def eye(num_rows,
num_columns=None,
batch_shape=None,
dtype=dtypes.float32,
name=None):
"""Construct an identity matrix, or a batch of matrices.
```python
# Construct one identity matrix.
tf.eye(2)
==> [[1., 0.],
[0., 1.]]
# Construct a batch of 3 identity matricies, each 2 x 2.
# batch_identity[i, :, :] is a 2 x 2 identity matrix, i = 0, 1, 2.
batch_identity = tf.eye(2, batch_shape=[3])
# Construct one 2 x 3 "identity" matrix
tf.eye(2, num_columns=3)
==> [[ 1., 0., 0.],
[ 0., 1., 0.]]
```
Args:
num_rows: Non-negative `int32` scalar `Tensor` giving the number of rows
in each batch matrix.
num_columns: Optional non-negative `int32` scalar `Tensor` giving the number
of columns in each batch matrix. Defaults to `num_rows`.
batch_shape: A list or tuple of Python integers or a 1-D `int32` `Tensor`.
If provided, the returned `Tensor` will have leading batch dimensions of
this shape.
dtype: The type of an element in the resulting `Tensor`
name: A name for this `Op`. Defaults to "eye".
Returns:
A `Tensor` of shape `batch_shape + [num_rows, num_columns]`
"""
return linalg_ops_impl.eye(num_rows,
num_columns=num_columns,
batch_shape=batch_shape,
dtype=dtype,
name=name)
@tf_export('matrix_solve_ls', 'linalg.lstsq')
def matrix_solve_ls(matrix, rhs, l2_regularizer=0.0, fast=True, name=None):
r"""Solves one or more linear least-squares problems.
`matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
form `M`-by-`N` matrices. Rhs is a tensor of shape `[..., M, K]` whose
inner-most 2 dimensions form `M`-by-`K` matrices. The computed output is a
`Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form `M`-by-`K`
matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least squares
sense.
Below we will use the following notation for each pair of matrix and
right-hand sides in the batch:
`matrix`=\\(A \in \Re^{m \times n}\\),
`rhs`=\\(B \in \Re^{m \times k}\\),
`output`=\\(X \in \Re^{n \times k}\\),
`l2_regularizer`=\\(\lambda\\).
If `fast` is `True`, then the solution is computed by solving the normal
equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
\\(X = (A^T A + \lambda I)^{-1} A^T B\\), which solves the least-squares
problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||A Z - B||_F^2 +
\lambda ||Z||_F^2\\). If \\(m \lt n\\) then `output` is computed as
\\(X = A^T (A A^T + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is
the minimum-norm solution to the under-determined linear system, i.e.
\\(X = \mathrm{argmin}_{Z \in \Re^{n \times k}} ||Z||_F^2 \\), subject to
\\(A Z = B\\). Notice that the fast path is only numerically stable when
\\(A\\) is numerically full rank and has a condition number
\\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach}}}\\) or\\(\lambda\\)
is sufficiently large.
If `fast` is `False` an algorithm based on the numerically robust complete
orthogonal decomposition is used. This computes the minimum-norm
least-squares solution, even when \\(A\\) is rank deficient. This path is
typically 6-7 times slower than the fast path. If `fast` is `False` then
`l2_regularizer` is ignored.
Args:
matrix: `Tensor` of shape `[..., M, N]`.
rhs: `Tensor` of shape `[..., M, K]`.
l2_regularizer: 0-D `double` `Tensor`. Ignored if `fast=False`.
fast: bool. Defaults to `True`.
name: string, optional name of the operation.
Returns:
output: `Tensor` of shape `[..., N, K]` whose inner-most 2 dimensions form
`M`-by-`K` matrices that solve the equations
`matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]` in the least
squares sense.
Raises:
NotImplementedError: matrix_solve_ls is currently disabled for complex128
and l2_regularizer != 0 due to poor accuracy.
"""
# pylint: disable=long-lambda
def _use_composite_impl(fast, tensor_shape):
"""Determines whether to use the composite or specialized CPU kernel.
When the total size of the tensor is larger than the cache size and the
batch size is large compared to the smallest matrix dimension, then the
composite implementation is inefficient since it has to read the entire
tensor from memory multiple times. In this case we fall back to the
original CPU kernel, which does all the computational steps on each
matrix separately.
Only fast mode is supported by the composite impl, so `False` is returned
if `fast` is `False`.
Args:
fast: bool indicating if fast mode in the solver was requested.
tensor_shape: The shape of the tensor.
Returns:
True if the composite impl should be used. False otherwise.
"""
if fast is False:
return False
batch_shape = tensor_shape[:-2]
matrix_shape = tensor_shape[-2:]
if not tensor_shape.is_fully_defined():
return True
tensor_size = tensor_shape.num_elements() * matrix.dtype.size
is_io_bound = batch_shape.num_elements() > np.min(matrix_shape)
L2_CACHE_SIZE_GUESSTIMATE = 256000
if tensor_size > L2_CACHE_SIZE_GUESSTIMATE and is_io_bound:
return False
else:
return True
def _overdetermined(matrix, rhs, l2_regularizer):
"""Computes (A^H*A + l2_regularizer)^{-1} * A^H * rhs."""
chol = _RegularizedGramianCholesky(
matrix, l2_regularizer=l2_regularizer, first_kind=True)
return cholesky_solve(chol, math_ops.matmul(matrix, rhs, adjoint_a=True))
def _underdetermined(matrix, rhs, l2_regularizer):
"""Computes A^H * (A*A^H + l2_regularizer)^{-1} * rhs."""
chol = _RegularizedGramianCholesky(
matrix, l2_regularizer=l2_regularizer, first_kind=False)
return math_ops.matmul(matrix, cholesky_solve(chol, rhs), adjoint_a=True)
def _composite_impl(matrix, rhs, l2_regularizer):
"""Composite implementation of matrix_solve_ls that supports GPU."""
with ops.name_scope(name, 'matrix_solve_ls', [matrix, rhs, l2_regularizer]):
matrix_shape = matrix.get_shape()[-2:]
if matrix_shape.is_fully_defined():
if matrix_shape[-2] >= matrix_shape[-1]:
return _overdetermined(matrix, rhs, l2_regularizer)
else:
return _underdetermined(matrix, rhs, l2_regularizer)
else:
# We have to defer determining the shape to runtime and use
# conditional execution of the appropriate graph.
matrix_shape = array_ops.shape(matrix)[-2:]
return control_flow_ops.cond(
matrix_shape[-2] >= matrix_shape[-1],
lambda: _overdetermined(matrix, rhs, l2_regularizer),
lambda: _underdetermined(matrix, rhs, l2_regularizer))
matrix = ops.convert_to_tensor(matrix, name='matrix')
if matrix.dtype == dtypes.complex128 and l2_regularizer != 0:
# TODO(rmlarsen): Investigate and fix accuracy bug.
raise NotImplementedError('matrix_solve_ls is currently disabled for '
'complex128 and l2_regularizer != 0 due to '
'poor accuracy.')
tensor_shape = matrix.get_shape()
if _use_composite_impl(fast, tensor_shape):
return _composite_impl(matrix, rhs, l2_regularizer)
else:
return gen_linalg_ops.matrix_solve_ls(
matrix, rhs, l2_regularizer, fast=fast, name=name)
@tf_export('self_adjoint_eig', 'linalg.eigh')
def self_adjoint_eig(tensor, name=None):
"""Computes the eigen decomposition of a batch of self-adjoint matrices.
Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices
in `tensor` such that
`tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i]`, for i=0...N-1.
Args:
tensor: `Tensor` of shape `[..., N, N]`. Only the lower triangular part of
each inner inner matrix is referenced.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. Sorted in non-decreasing order.
v: Eigenvectors. Shape is `[..., N, N]`. The columns of the inner most
matrices contain eigenvectors of the corresponding matrices in `tensor`
"""
e, v = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=True, name=name)
return e, v
@tf_export('self_adjoint_eigvals', 'linalg.eigvalsh')
def self_adjoint_eigvals(tensor, name=None):
"""Computes the eigenvalues of one or more self-adjoint matrices.
Note: If your program backpropagates through this function, you should replace
it with a call to tf.self_adjoint_eig (possibly ignoring the second output) to
avoid computing the eigen decomposition twice. This is because the
eigenvectors are used to compute the gradient w.r.t. the eigenvalues. See
_SelfAdjointEigV2Grad in linalg_grad.py.
Args:
tensor: `Tensor` of shape `[..., N, N]`.
name: string, optional name of the operation.
Returns:
e: Eigenvalues. Shape is `[..., N]`. The vector `e[..., :]` contains the `N`
eigenvalues of `tensor[..., :, :]`.
"""
e, _ = gen_linalg_ops.self_adjoint_eig_v2(tensor, compute_v=False, name=name)
return e
@tf_export('svd', 'linalg.svd')
def svd(tensor, full_matrices=False, compute_uv=True, name=None):
r"""Computes the singular value decompositions of one or more matrices.
Computes the SVD of each inner matrix in `tensor` such that
`tensor[..., :, :] = u[..., :, :] * diag(s[..., :, :]) *
transpose(conj(v[..., :, :]))`
```python
# a is a tensor.
# s is a tensor of singular values.
# u is a tensor of left singular vectors.
# v is a tensor of right singular vectors.
s, u, v = svd(a)
s = svd(a, compute_uv=False)
```
Args:
tensor: `Tensor` of shape `[..., M, N]`. Let `P` be the minimum of `M` and
`N`.
full_matrices: If true, compute full-sized `u` and `v`. If false
(the default), compute only the leading `P` singular vectors.
Ignored if `compute_uv` is `False`.
compute_uv: If `True` then left and right singular vectors will be
computed and returned in `u` and `v`, respectively. Otherwise, only the
singular values will be computed, which can be significantly faster.
name: string, optional name of the operation.
Returns:
s: Singular values. Shape is `[..., P]`. The values are sorted in reverse
order of magnitude, so s[..., 0] is the largest value, s[..., 1] is the
second largest, etc.
u: Left singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., M, P]`; if `full_matrices` is `True` then shape is
`[..., M, M]`. Not returned if `compute_uv` is `False`.
v: Right singular vectors. If `full_matrices` is `False` (default) then
shape is `[..., N, P]`. If `full_matrices` is `True` then shape is
`[..., N, N]`. Not returned if `compute_uv` is `False`.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.svd, except that
* The order of output arguments here is `s`, `u`, `v` when `compute_uv` is
`True`, as opposed to `u`, `s`, `v` for numpy.linalg.svd.
* full_matrices is `False` by default as opposed to `True` for
numpy.linalg.svd.
* tf.linalg.svd uses the standard definition of the SVD
\\(A = U \Sigma V^H\\), such that the left singular vectors of `a` are
the columns of `u`, while the right singular vectors of `a` are the
columns of `v`. On the other hand, numpy.linalg.svd returns the adjoint
\\(V^H\\) as the third output argument.
```python
import tensorflow as tf
import numpy as np
s, u, v = tf.linalg.svd(a)
tf_a_approx = tf.matmul(u, tf.matmul(tf.linalg.diag(s), v, adjoint_v=True))
u, s, v_adj = np.linalg.svd(a, full_matrices=False)
np_a_approx = np.dot(u, np.dot(np.diag(s), v_adj))
# tf_a_approx and np_a_approx should be numerically close.
```
@end_compatibility
"""
s, u, v = gen_linalg_ops.svd(
tensor, compute_uv=compute_uv, full_matrices=full_matrices, name=name)
if compute_uv:
return math_ops.real(s), u, v
else:
return math_ops.real(s)
# pylint: disable=redefined-builtin
@tf_export('norm', 'linalg.norm')
@deprecation.deprecated_args(
None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims')
def norm(tensor,
ord='euclidean',
axis=None,
keepdims=None,
name=None,
keep_dims=None):
r"""Computes the norm of vectors, matrices, and tensors.
This function can compute several different vector norms (the 1-norm, the
Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).
Args:
tensor: `Tensor` of types `float32`, `float64`, `complex64`, `complex128`
ord: Order of the norm. Supported values are 'fro', 'euclidean',
`1`, `2`, `np.inf` and any positive real number yielding the corresponding
p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if
`tensor` is a matrix and equivalent to 2-norm for vectors.
Some restrictions apply:
a) The Frobenius norm `fro` is not defined for vectors,
b) If axis is a 2-tuple (matrix norm), only 'euclidean', 'fro', `1`,
`2`, `np.inf` are supported.
See the description of `axis` on how to compute norms for a batch of
vectors or matrices stored in a tensor.
axis: If `axis` is `None` (the default), the input is considered a vector
and a single vector norm is computed over the entire set of values in the
tensor, i.e. `norm(tensor, ord=ord)` is equivalent to
`norm(reshape(tensor, [-1]), ord=ord)`.
If `axis` is a Python integer, the input is considered a batch of vectors,
and `axis` determines the axis in `tensor` over which to compute vector
norms.
If `axis` is a 2-tuple of Python integers it is considered a batch of
matrices and `axis` determines the axes in `tensor` over which to compute
a matrix norm.
Negative indices are supported. Example: If you are passing a tensor that
can be either a matrix or a batch of matrices at runtime, pass
`axis=[-2,-1]` instead of `axis=None` to make sure that matrix norms are
computed.
keepdims: If True, the axis indicated in `axis` are kept with size 1.
Otherwise, the dimensions in `axis` are removed from the output shape.
name: The name of the op.
keep_dims: Deprecated alias for `keepdims`.
Returns:
output: A `Tensor` of the same type as tensor, containing the vector or
matrix norms. If `keepdims` is True then the rank of output is equal to
the rank of `tensor`. Otherwise, if `axis` is none the output is a scalar,
if `axis` is an integer, the rank of `output` is one less than the rank
of `tensor`, if `axis` is a 2-tuple the rank of `output` is two less
than the rank of `tensor`.
Raises:
ValueError: If `ord` or `axis` is invalid.
@compatibility(numpy)
Mostly equivalent to numpy.linalg.norm.
Not supported: ord <= 0, 2-norm for matrices, nuclear norm.
Other differences:
a) If axis is `None`, treats the flattened `tensor` as a vector
regardless of rank.
b) Explicitly supports 'euclidean' norm as the default, including for
higher order tensors.
@end_compatibility
"""
keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims,
'keep_dims', keep_dims)
if keepdims is None:
keepdims = False
is_matrix_norm = ((isinstance(axis, tuple) or isinstance(axis, list)) and
len(axis) == 2)
if is_matrix_norm:
axis = tuple(axis)
if (not isinstance(axis[0], int) or not isinstance(axis[1], int) or
axis[0] == axis[1]):
raise ValueError(
"'axis' must be None, an integer, or a tuple of 2 unique integers")
supported_matrix_norms = ['euclidean', 'fro', 1, 2, np.inf]
if ord not in supported_matrix_norms:
raise ValueError("'ord' must be a supported matrix norm in %s, got %s" %
(supported_matrix_norms, ord))
else:
if not (isinstance(axis, int) or axis is None):
raise ValueError(
"'axis' must be None, an integer, or a tuple of 2 unique integers")
supported_vector_norms = ['euclidean', 1, 2, np.inf]
if (not np.isreal(ord) or ord <= 0) and ord not in supported_vector_norms:
raise ValueError("'ord' must be a supported vector norm, got %s" % ord)
if axis is not None:
axis = (axis,)
with ops.name_scope(name, 'norm', [tensor]):
tensor = ops.convert_to_tensor(tensor)
if ord in ['fro', 'euclidean', 2, 2.0]:
if is_matrix_norm and ord in [2, 2.0]:
rank = array_ops.rank(tensor)
positive_axis = functional_ops.map_fn(
lambda i: control_flow_ops.cond(i >= 0, lambda: i, lambda: i + rank),
ops.convert_to_tensor(axis))
axes = math_ops.range(rank)
perm_before = array_ops.concat(
[array_ops.setdiff1d(axes, positive_axis)[0], positive_axis],
axis=0)
perm_after = functional_ops.map_fn(
lambda i: math_ops.cast(
array_ops.squeeze(
array_ops.where(math_ops.equal(perm_before, i))),
dtype=dtypes.int32), axes)
permed = array_ops.transpose(tensor, perm=perm_before)
matrix_2_norm = array_ops.expand_dims(
math_ops.reduce_max(
math_ops.abs(gen_linalg_ops.svd(permed, compute_uv=False)[0]),
axis=-1,
keepdims=True),
axis=-1)
result = array_ops.transpose(matrix_2_norm, perm=perm_after)
else:
result = math_ops.sqrt(
math_ops.reduce_sum(
tensor * math_ops.conj(tensor), axis, keepdims=True))
else:
result = math_ops.abs(tensor)
if ord == 1:
sum_axis = None if axis is None else axis[0]
result = math_ops.reduce_sum(result, sum_axis, keepdims=True)
if is_matrix_norm:
result = math_ops.reduce_max(result, axis[-1], keepdims=True)
elif ord == np.inf:
if is_matrix_norm:
result = math_ops.reduce_sum(result, axis[1], keepdims=True)
max_axis = None if axis is None else axis[0]
result = math_ops.reduce_max(result, max_axis, keepdims=True)
else:
# General p-norms (positive p only)
result = math_ops.pow(
math_ops.reduce_sum(math_ops.pow(result, ord), axis, keepdims=True),
1.0 / ord)
if not keepdims:
result = array_ops.squeeze(result, axis)
return result
# pylint: enable=invalid-name,redefined-builtin
|
apache-2.0
|
breathe/ansible-modules-extras
|
web_infrastructure/jira.py
|
78
|
10086
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Steve Smith <ssmith@atlassian.com>
# Atlassian open-source approval reference OSR-76.
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
module: jira
version_added: "1.6"
short_description: create and modify issues in a JIRA instance
description:
- Create and modify issues in a JIRA instance.
options:
uri:
required: true
description:
- Base URI for the JIRA instance
operation:
required: true
aliases: [ command ]
choices: [ create, comment, edit, fetch, transition ]
description:
- The operation to perform.
username:
required: true
description:
- The username to log-in with.
password:
required: true
description:
- The password to log-in with.
project:
aliases: [ prj ]
required: false
description:
- The project for this operation. Required for issue creation.
summary:
required: false
description:
- The issue summary, where appropriate.
description:
required: false
description:
- The issue description, where appropriate.
issuetype:
required: false
description:
- The issue type, for issue creation.
issue:
required: false
description:
- An existing issue key to operate on.
comment:
required: false
description:
- The comment text to add.
status:
required: false
description:
- The desired status; only relevant for the transition operation.
assignee:
required: false
description:
- Sets the assignee on create or transition operations. Note not all transitions will allow this.
fields:
required: false
description:
- This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields.
notes:
- "Currently this only works with basic-auth."
author: "Steve Smith (@tarka)"
"""
EXAMPLES = """
# Create a new issue and add a comment to it:
- name: Create an issue
jira: uri={{server}} username={{user}} password={{pass}}
project=ANS operation=create
summary="Example Issue" description="Created using Ansible" issuetype=Task
register: issue
- name: Comment on issue
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=comment
comment="A comment added by Ansible"
# Assign an existing issue using edit
- name: Assign an issue using free-form fields
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=edit
assignee=ssmith
# Create an issue with an existing assignee
- name: Create an assigned issue
jira: uri={{server}} username={{user}} password={{pass}}
project=ANS operation=create
summary="Assigned issue" description="Created and assigned using Ansible"
issuetype=Task assignee=ssmith
# Edit an issue using free-form fields
- name: Set the labels on an issue using free-form fields
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=edit
args: { fields: {labels: ["autocreated", "ansible"]}}
- name: Set the labels on an issue, YAML version
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=edit
args:
fields:
labels:
- "autocreated"
- "ansible"
- "yaml"
# Retrieve metadata for an issue and use it to create an account
- name: Get an issue
jira: uri={{server}} username={{user}} password={{pass}}
project=ANS operation=fetch issue="ANS-63"
register: issue
- name: Create a unix account for the reporter
sudo: true
user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}"
# Transition an issue by target status
- name: Close the issue
jira: uri={{server}} username={{user}} password={{pass}}
issue={{issue.meta.key}} operation=transition status="Done"
"""
import json
import base64
def request(url, user, passwd, data=None, method=None):
if data:
data = json.dumps(data)
# NOTE: fetch_url uses a password manager, which follows the
# standard request-then-challenge basic-auth semantics. However as
# JIRA allows some unauthorised operations it doesn't necessarily
# send the challenge, so the request occurs as the anonymous user,
# resulting in unexpected results. To work around this we manually
# inject the basic-auth header up-front to ensure that JIRA treats
# the requests as authorized for this user.
auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '')
response, info = fetch_url(module, url, data=data, method=method,
headers={'Content-Type':'application/json',
'Authorization':"Basic %s" % auth})
if info['status'] not in (200, 204):
module.fail_json(msg=info['msg'])
body = response.read()
if body:
return json.loads(body)
else:
return {}
def post(url, user, passwd, data):
return request(url, user, passwd, data=data, method='POST')
def put(url, user, passwd, data):
return request(url, user, passwd, data=data, method='PUT')
def get(url, user, passwd):
return request(url, user, passwd)
def create(restbase, user, passwd, params):
createfields = {
'project': { 'key': params['project'] },
'summary': params['summary'],
'description': params['description'],
'issuetype': { 'name': params['issuetype'] }}
# Merge in any additional or overridden fields
if params['fields']:
createfields.update(params['fields'])
data = {'fields': createfields}
url = restbase + '/issue/'
ret = post(url, user, passwd, data)
return ret
def comment(restbase, user, passwd, params):
data = {
'body': params['comment']
}
url = restbase + '/issue/' + params['issue'] + '/comment'
ret = post(url, user, passwd, data)
return ret
def edit(restbase, user, passwd, params):
data = {
'fields': params['fields']
}
url = restbase + '/issue/' + params['issue']
ret = put(url, user, passwd, data)
return ret
def fetch(restbase, user, passwd, params):
url = restbase + '/issue/' + params['issue']
ret = get(url, user, passwd)
return ret
def transition(restbase, user, passwd, params):
# Find the transition id
turl = restbase + '/issue/' + params['issue'] + "/transitions"
tmeta = get(turl, user, passwd)
target = params['status']
tid = None
for t in tmeta['transitions']:
if t['name'] == target:
tid = t['id']
break
if not tid:
raise ValueError("Failed find valid transition for '%s'" % target)
# Perform it
url = restbase + '/issue/' + params['issue'] + "/transitions"
data = { 'transition': { "id" : tid },
'fields': params['fields']}
ret = post(url, user, passwd, data)
return ret
# Some parameters are required depending on the operation:
OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'],
comment=['issue', 'comment'],
edit=[],
fetch=['issue'],
transition=['status'])
def main():
global module
module = AnsibleModule(
argument_spec=dict(
uri=dict(required=True),
operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'],
aliases=['command'], required=True),
username=dict(required=True),
password=dict(required=True),
project=dict(),
summary=dict(),
description=dict(),
issuetype=dict(),
issue=dict(aliases=['ticket']),
comment=dict(),
status=dict(),
assignee=dict(),
fields=dict(default={})
),
supports_check_mode=False
)
op = module.params['operation']
# Check we have the necessary per-operation parameters
missing = []
for parm in OP_REQUIRED[op]:
if not module.params[parm]:
missing.append(parm)
if missing:
module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing)))
# Handle rest of parameters
uri = module.params['uri']
user = module.params['username']
passwd = module.params['password']
if module.params['assignee']:
module.params['fields']['assignee'] = { 'name': module.params['assignee'] }
if not uri.endswith('/'):
uri = uri+'/'
restbase = uri + 'rest/api/2'
# Dispatch
try:
# Lookup the corresponding method for this operation. This is
# safe as the AnsibleModule should remove any unknown operations.
thismod = sys.modules[__name__]
method = getattr(thismod, op)
ret = method(restbase, user, passwd, module.params)
except Exception, e:
return module.fail_json(msg=e.message)
module.exit_json(changed=True, meta=ret)
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
gpl-3.0
|
enthought/shiboken
|
tests/samplebinding/lock_test.py
|
9
|
1608
|
#!/usr/bin/env python
'''Simple test with a blocking C++ method that should allow python
threads to run.'''
import unittest
import threading
from sample import Bucket
class Unlocker(threading.Thread):
def __init__(self, bucket):
threading.Thread.__init__(self)
self.bucket = bucket
def run(self):
while not self.bucket.locked():
pass
self.bucket.unlock()
class MyBucket(Bucket):
def __init__(self):
Bucket.__init__(self)
def virtualBlockerMethod(self):
self.lock()
return True
class TestLockUnlock(unittest.TestCase):
def testBasic(self):
'''Locking in C++ and releasing in a python thread'''
bucket = Bucket()
unlocker = Unlocker(bucket)
unlocker.start()
bucket.lock()
unlocker.join()
def testVirtualBlocker(self):
'''Same as the basic case but blocker method is a C++ virtual called from C++.'''
bucket = Bucket()
unlocker = Unlocker(bucket)
unlocker.start()
result = bucket.callVirtualBlockerMethodButYouDontKnowThis()
unlocker.join()
self.assert_(result)
def testReimplementedVirtualBlocker(self):
'''Same as the basic case but blocker method is a C++ virtual reimplemented in Python and called from C++.'''
mybucket = MyBucket()
unlocker = Unlocker(mybucket)
unlocker.start()
result = mybucket.callVirtualBlockerMethodButYouDontKnowThis()
unlocker.join()
self.assert_(result)
if __name__ == '__main__':
unittest.main()
|
gpl-2.0
|
etuna-SBF-kog/Stadsparken
|
env/lib/python2.7/site-packages/django/contrib/admin/helpers.py
|
84
|
13636
|
from django import forms
from django.contrib.admin.util import (flatten_fieldsets, lookup_field,
display_for_field, label_for_field, help_text_for_field)
from django.contrib.admin.templatetags.admin_static import static
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.fields.related import ManyToManyRel
from django.forms.util import flatatt
from django.template.defaultfilters import capfirst
from django.utils.encoding import force_unicode, smart_unicode
from django.utils.html import escape, conditional_escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
ACTION_CHECKBOX_NAME = '_selected_action'
class ActionForm(forms.Form):
action = forms.ChoiceField(label=_('Action:'))
select_across = forms.BooleanField(label='', required=False, initial=0,
widget=forms.HiddenInput({'class': 'select-across'}))
checkbox = forms.CheckboxInput({'class': 'action-select'}, lambda value: False)
class AdminForm(object):
def __init__(self, form, fieldsets, prepopulated_fields, readonly_fields=None, model_admin=None):
self.form, self.fieldsets = form, normalize_fieldsets(fieldsets)
self.prepopulated_fields = [{
'field': form[field_name],
'dependencies': [form[f] for f in dependencies]
} for field_name, dependencies in prepopulated_fields.items()]
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for name, options in self.fieldsets:
yield Fieldset(self.form, name,
readonly_fields=self.readonly_fields,
model_admin=self.model_admin,
**options
)
def first_field(self):
try:
fieldset_name, fieldset_options = self.fieldsets[0]
field_name = fieldset_options['fields'][0]
if not isinstance(field_name, basestring):
field_name = field_name[0]
return self.form[field_name]
except (KeyError, IndexError):
pass
try:
return iter(self.form).next()
except StopIteration:
return None
def _media(self):
media = self.form.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class Fieldset(object):
def __init__(self, form, name=None, readonly_fields=(), fields=(), classes=(),
description=None, model_admin=None):
self.form = form
self.name, self.fields = name, fields
self.classes = u' '.join(classes)
self.description = description
self.model_admin = model_admin
self.readonly_fields = readonly_fields
def _media(self):
if 'collapse' in self.classes:
extra = '' if settings.DEBUG else '.min'
js = ['jquery%s.js' % extra,
'jquery.init.js',
'collapse%s.js' % extra]
return forms.Media(js=[static('admin/js/%s' % url) for url in js])
return forms.Media()
media = property(_media)
def __iter__(self):
for field in self.fields:
yield Fieldline(self.form, field, self.readonly_fields, model_admin=self.model_admin)
class Fieldline(object):
def __init__(self, form, field, readonly_fields=None, model_admin=None):
self.form = form # A django.forms.Form instance
if not hasattr(field, "__iter__"):
self.fields = [field]
else:
self.fields = field
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
def __iter__(self):
for i, field in enumerate(self.fields):
if field in self.readonly_fields:
yield AdminReadonlyField(self.form, field, is_first=(i == 0),
model_admin=self.model_admin)
else:
yield AdminField(self.form, field, is_first=(i == 0))
def errors(self):
return mark_safe(u'\n'.join([self.form[f].errors.as_ul() for f in self.fields if f not in self.readonly_fields]).strip('\n'))
class AdminField(object):
def __init__(self, form, field, is_first):
self.field = form[field] # A django.forms.BoundField instance
self.is_first = is_first # Whether this field is first on the line
self.is_checkbox = isinstance(self.field.field.widget, forms.CheckboxInput)
def label_tag(self):
classes = []
contents = conditional_escape(force_unicode(self.field.label))
if self.is_checkbox:
classes.append(u'vCheckboxLabel')
else:
contents += u':'
if self.field.field.required:
classes.append(u'required')
if not self.is_first:
classes.append(u'inline')
attrs = classes and {'class': u' '.join(classes)} or {}
return self.field.label_tag(contents=mark_safe(contents), attrs=attrs)
def errors(self):
return mark_safe(self.field.errors.as_ul())
class AdminReadonlyField(object):
def __init__(self, form, field, is_first, model_admin=None):
label = label_for_field(field, form._meta.model, model_admin)
# Make self.field look a little bit like a field. This means that
# {{ field.name }} must be a useful class name to identify the field.
# For convenience, store other field-related data here too.
if callable(field):
class_name = field.__name__ != '<lambda>' and field.__name__ or ''
else:
class_name = field
self.field = {
'name': class_name,
'label': label,
'field': field,
'help_text': help_text_for_field(class_name, form._meta.model)
}
self.form = form
self.model_admin = model_admin
self.is_first = is_first
self.is_checkbox = False
self.is_readonly = True
def label_tag(self):
attrs = {}
if not self.is_first:
attrs["class"] = "inline"
label = self.field['label']
contents = capfirst(force_unicode(escape(label))) + u":"
return mark_safe('<label%(attrs)s>%(contents)s</label>' % {
"attrs": flatatt(attrs),
"contents": contents,
})
def contents(self):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
field, obj, model_admin = self.field['field'], self.form.instance, self.model_admin
try:
f, attr, value = lookup_field(field, obj, model_admin)
except (AttributeError, ValueError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
if f is None:
boolean = getattr(attr, "boolean", False)
if boolean:
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
if getattr(attr, "allow_tags", False):
result_repr = mark_safe(result_repr)
else:
if value is None:
result_repr = EMPTY_CHANGELIST_VALUE
elif isinstance(f.rel, ManyToManyRel):
result_repr = ", ".join(map(unicode, value.all()))
else:
result_repr = display_for_field(value, f)
return conditional_escape(result_repr)
class InlineAdminFormSet(object):
"""
A wrapper around an inline formset for use in the admin system.
"""
def __init__(self, inline, formset, fieldsets, prepopulated_fields=None,
readonly_fields=None, model_admin=None):
self.opts = inline
self.formset = formset
self.fieldsets = fieldsets
self.model_admin = model_admin
if readonly_fields is None:
readonly_fields = ()
self.readonly_fields = readonly_fields
if prepopulated_fields is None:
prepopulated_fields = {}
self.prepopulated_fields = prepopulated_fields
def __iter__(self):
for form, original in zip(self.formset.initial_forms, self.formset.get_queryset()):
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, original, self.readonly_fields,
model_admin=self.opts)
for form in self.formset.extra_forms:
yield InlineAdminForm(self.formset, form, self.fieldsets,
self.prepopulated_fields, None, self.readonly_fields,
model_admin=self.opts)
yield InlineAdminForm(self.formset, self.formset.empty_form,
self.fieldsets, self.prepopulated_fields, None,
self.readonly_fields, model_admin=self.opts)
def fields(self):
fk = getattr(self.formset, "fk", None)
for i, field in enumerate(flatten_fieldsets(self.fieldsets)):
if fk and fk.name == field:
continue
if field in self.readonly_fields:
yield {
'label': label_for_field(field, self.opts.model, self.opts),
'widget': {
'is_hidden': False
},
'required': False
}
else:
yield self.formset.form.base_fields[field]
def _media(self):
media = self.opts.media + self.formset.media
for fs in self:
media = media + fs.media
return media
media = property(_media)
class InlineAdminForm(AdminForm):
"""
A wrapper around an inline form for use in the admin system.
"""
def __init__(self, formset, form, fieldsets, prepopulated_fields, original,
readonly_fields=None, model_admin=None):
self.formset = formset
self.model_admin = model_admin
self.original = original
if original is not None:
self.original_content_type_id = ContentType.objects.get_for_model(original).pk
self.show_url = original and hasattr(original, 'get_absolute_url')
super(InlineAdminForm, self).__init__(form, fieldsets, prepopulated_fields,
readonly_fields, model_admin)
def __iter__(self):
for name, options in self.fieldsets:
yield InlineFieldset(self.formset, self.form, name,
self.readonly_fields, model_admin=self.model_admin, **options)
def has_auto_field(self):
if self.form._meta.model._meta.has_auto_field:
return True
# Also search any parents for an auto field.
for parent in self.form._meta.model._meta.get_parent_list():
if parent._meta.has_auto_field:
return True
return False
def field_count(self):
# tabular.html uses this function for colspan value.
num_of_fields = 0
if self.has_auto_field():
num_of_fields += 1
num_of_fields += len(self.fieldsets[0][1]["fields"])
if self.formset.can_order:
num_of_fields += 1
if self.formset.can_delete:
num_of_fields += 1
return num_of_fields
def pk_field(self):
return AdminField(self.form, self.formset._pk_field.name, False)
def fk_field(self):
fk = getattr(self.formset, "fk", None)
if fk:
return AdminField(self.form, fk.name, False)
else:
return ""
def deletion_field(self):
from django.forms.formsets import DELETION_FIELD_NAME
return AdminField(self.form, DELETION_FIELD_NAME, False)
def ordering_field(self):
from django.forms.formsets import ORDERING_FIELD_NAME
return AdminField(self.form, ORDERING_FIELD_NAME, False)
class InlineFieldset(Fieldset):
def __init__(self, formset, *args, **kwargs):
self.formset = formset
super(InlineFieldset, self).__init__(*args, **kwargs)
def __iter__(self):
fk = getattr(self.formset, "fk", None)
for field in self.fields:
if fk and fk.name == field:
continue
yield Fieldline(self.form, field, self.readonly_fields,
model_admin=self.model_admin)
class AdminErrorList(forms.util.ErrorList):
"""
Stores all errors for the form/formsets in an add/change stage view.
"""
def __init__(self, form, inline_formsets):
if form.is_bound:
self.extend(form.errors.values())
for inline_formset in inline_formsets:
self.extend(inline_formset.non_form_errors())
for errors_in_inline_form in inline_formset.errors:
self.extend(errors_in_inline_form.values())
def normalize_fieldsets(fieldsets):
"""
Make sure the keys in fieldset dictionaries are strings. Returns the
normalized data.
"""
result = []
for name, options in fieldsets:
result.append((name, normalize_dictionary(options)))
return result
def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict
|
gpl-3.0
|
kmee/account-invoicing
|
stock_picking_invoicing/tests/test_picking_invoicing.py
|
21
|
3137
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Lorenzo Battistini <lorenzo.battistini@agilebg.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp.tests.common as test_common
class TestPickingInvoicing(test_common.SingleTransactionCase):
def setUp(self):
super(TestPickingInvoicing, self).setUp()
self.picking_model = self.env['stock.picking']
self.move_model = self.env['stock.move']
self.invoice_wizard = self.env['stock.invoice.onshipping']
self.invoice_model = self.env['account.invoice']
self.partner_model = self.env['res.partner']
def test_0_picking_invoicing(self):
agrolait = self.partner_model.browse(self.ref('base.res_partner_2'))
# setting Agrolait type to default, because it's 'contact' in demo data
agrolait.write({'type': 'default'})
picking = self.picking_model.create({
# using Agrolait, Michel Fletcher
'partner_id': self.ref('base.res_partner_address_4'),
'picking_type_id': self.ref('stock.picking_type_in'),
})
prod_id = self.ref('product.product_product_10')
move_vals = self.move_model.onchange_product_id(
prod_id=prod_id)['value']
move_vals['product_id'] = prod_id
move_vals['picking_id'] = picking.id
move_vals['location_dest_id'] = self.ref(
'stock.stock_location_customers')
move_vals['location_id'] = self.ref(
'stock.stock_location_stock')
self.move_model.create(move_vals)
picking.set_to_be_invoiced()
picking.action_confirm()
picking.action_assign()
picking.do_prepare_partial()
picking.do_transfer()
self.assertEqual(picking.state, 'done')
wizard = self.invoice_wizard.with_context(
{
'active_ids': [picking.id],
'active_model': 'stock.picking',
'active_id': picking.id,
}
).create({'journal_id': self.ref('account.sales_journal')})
invoices = wizard.create_invoice()
self.assertEqual(picking.invoice_state, 'invoiced')
invoice = self.invoice_model.browse(invoices[0])
# invoice partner must be Agrolait
self.assertEqual(invoice.partner_id.id, self.ref('base.res_partner_2'))
|
agpl-3.0
|
RevelSystems/django
|
django/contrib/auth/models.py
|
104
|
17967
|
from __future__ import unicode_literals
from django.contrib import auth
from django.contrib.auth.hashers import (
check_password, is_password_usable, make_password,
)
from django.contrib.auth.signals import user_logged_in
from django.contrib.contenttypes.models import ContentType
from django.core import validators
from django.core.exceptions import PermissionDenied
from django.core.mail import send_mail
from django.db import models
from django.db.models.manager import EmptyManager
from django.utils import six, timezone
from django.utils.crypto import get_random_string, salted_hmac
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
def update_last_login(sender, user, **kwargs):
"""
A signal receiver which updates the last_login date for
the user logging in.
"""
user.last_login = timezone.now()
user.save(update_fields=['last_login'])
user_logged_in.connect(update_last_login)
class PermissionManager(models.Manager):
use_in_migrations = True
def get_by_natural_key(self, codename, app_label, model):
return self.get(
codename=codename,
content_type=ContentType.objects.db_manager(self.db).get_by_natural_key(app_label, model),
)
@python_2_unicode_compatible
class Permission(models.Model):
"""
The permissions system provides a way to assign permissions to specific
users and groups of users.
The permission system is used by the Django admin site, but may also be
useful in your own code. The Django admin site uses permissions as follows:
- The "add" permission limits the user's ability to view the "add" form
and add an object.
- The "change" permission limits a user's ability to view the change
list, view the "change" form and change an object.
- The "delete" permission limits the ability to delete an object.
Permissions are set globally per type of object, not per specific object
instance. It is possible to say "Mary may change news stories," but it's
not currently possible to say "Mary may change news stories, but only the
ones she created herself" or "Mary may only change news stories that have a
certain status or publication date."
Three basic permissions -- add, change and delete -- are automatically
created for each Django model.
"""
name = models.CharField(_('name'), max_length=255)
content_type = models.ForeignKey(ContentType)
codename = models.CharField(_('codename'), max_length=100)
objects = PermissionManager()
class Meta:
verbose_name = _('permission')
verbose_name_plural = _('permissions')
unique_together = (('content_type', 'codename'),)
ordering = ('content_type__app_label', 'content_type__model',
'codename')
def __str__(self):
return "%s | %s | %s" % (
six.text_type(self.content_type.app_label),
six.text_type(self.content_type),
six.text_type(self.name))
def natural_key(self):
return (self.codename,) + self.content_type.natural_key()
natural_key.dependencies = ['contenttypes.contenttype']
class GroupManager(models.Manager):
"""
The manager for the auth's Group model.
"""
use_in_migrations = True
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Group(models.Model):
"""
Groups are a generic way of categorizing users to apply permissions, or
some other label, to those users. A user can belong to any number of
groups.
A user in a group automatically has all the permissions granted to that
group. For example, if the group Site editors has the permission
can_edit_home_page, any user in that group will have that permission.
Beyond permissions, groups are a convenient way to categorize users to
apply some label, or extended functionality, to them. For example, you
could create a group 'Special users', and you could write code that would
do special things to those users -- such as giving them access to a
members-only portion of your site, or sending them members-only email
messages.
"""
name = models.CharField(_('name'), max_length=80, unique=True)
permissions = models.ManyToManyField(Permission,
verbose_name=_('permissions'), blank=True)
objects = GroupManager()
class Meta:
verbose_name = _('group')
verbose_name_plural = _('groups')
def __str__(self):
return self.name
def natural_key(self):
return (self.name,)
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the address by lowercasing the domain part of the email
address.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generates a random password with the given length and given
allowed_chars. Note that the default value of allowed_chars does not
have "I" or "O" or letters and digits that look similar -- just to
avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
class UserManager(BaseUserManager):
use_in_migrations = True
def _create_user(self, username, email, password,
is_staff, is_superuser, **extra_fields):
"""
Creates and saves a User with the given username, email and password.
"""
now = timezone.now()
if not username:
raise ValueError('The given username must be set')
email = self.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=is_staff, is_active=True,
is_superuser=is_superuser,
date_joined=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, username, email=None, password=None, **extra_fields):
return self._create_user(username, email, password, False, False,
**extra_fields)
def create_superuser(self, username, email, password, **extra_fields):
return self._create_user(username, email, password, True, True,
**extra_fields)
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), blank=True, null=True)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __str__(self):
return self.get_username()
def natural_key(self):
return (self.get_username(),)
def is_anonymous(self):
"""
Always returns False. This is a way of comparing User objects to
anonymous users.
"""
return False
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
"""
Returns a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Sets a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
def get_session_auth_hash(self):
"""
Returns an HMAC of the password field.
"""
key_salt = "django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash"
return salted_hmac(key_salt, self.password).hexdigest()
# A few helper functions for common logic between User and AnonymousUser.
def _user_get_all_permissions(user, obj):
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_all_permissions"):
permissions.update(backend.get_all_permissions(user, obj))
return permissions
def _user_has_perm(user, perm, obj):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_perm'):
continue
try:
if backend.has_perm(user, perm, obj):
return True
except PermissionDenied:
return False
return False
def _user_has_module_perms(user, app_label):
"""
A backend can raise `PermissionDenied` to short-circuit permission checking.
"""
for backend in auth.get_backends():
if not hasattr(backend, 'has_module_perms'):
continue
try:
if backend.has_module_perms(user, app_label):
return True
except PermissionDenied:
return False
return False
class PermissionsMixin(models.Model):
"""
A mixin class that adds the fields and methods necessary to support
Django's Group and Permission model using the ModelBackend.
"""
is_superuser = models.BooleanField(_('superuser status'), default=False,
help_text=_('Designates that this user has all permissions without '
'explicitly assigning them.'))
groups = models.ManyToManyField(Group, verbose_name=_('groups'),
blank=True, help_text=_('The groups this user belongs to. A user will '
'get all permissions granted to each of '
'their groups.'),
related_name="user_set", related_query_name="user")
user_permissions = models.ManyToManyField(Permission,
verbose_name=_('user permissions'), blank=True,
help_text=_('Specific permissions for this user.'),
related_name="user_set", related_query_name="user")
class Meta:
abstract = True
def get_group_permissions(self, obj=None):
"""
Returns a list of permission strings that this user has through their
groups. This method queries all available auth backends. If an object
is passed in, only permissions matching this object are returned.
"""
permissions = set()
for backend in auth.get_backends():
if hasattr(backend, "get_group_permissions"):
permissions.update(backend.get_group_permissions(self, obj))
return permissions
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj)
def has_perm(self, perm, obj=None):
"""
Returns True if the user has the specified permission. This method
queries all available auth backends, but returns immediately if any
backend returns True. Thus, a user who has permission from a single
auth backend is assumed to have permission in general. If an object is
provided, permissions for this specific object are checked.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
# Otherwise we need to check the backends.
return _user_has_perm(self, perm, obj)
def has_perms(self, perm_list, obj=None):
"""
Returns True if the user has each of the specified permissions. If
object is passed, it checks if the user has all required perms for this
object.
"""
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, app_label):
"""
Returns True if the user has any permissions in the given app label.
Uses pretty much the same logic as has_perm, above.
"""
# Active superusers have all permissions.
if self.is_active and self.is_superuser:
return True
return _user_has_module_perms(self, app_label)
class AbstractUser(AbstractBaseUser, PermissionsMixin):
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$',
_('Enter a valid username. '
'This value may contain only letters, numbers '
'and @/./+/-/_ characters.'), 'invalid'),
],
error_messages={
'unique': _("A user with that username already exists."),
})
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), blank=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
abstract = True
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
class User(AbstractUser):
"""
Users within the Django authentication system are represented by this
model.
Username, password and email are required. Other fields are optional.
"""
class Meta(AbstractUser.Meta):
swappable = 'AUTH_USER_MODEL'
@python_2_unicode_compatible
class AnonymousUser(object):
id = None
pk = None
username = ''
is_staff = False
is_active = False
is_superuser = False
_groups = EmptyManager(Group)
_user_permissions = EmptyManager(Permission)
def __init__(self):
pass
def __str__(self):
return 'AnonymousUser'
def __eq__(self, other):
return isinstance(other, self.__class__)
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return 1 # instances always return the same hash value
def save(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def delete(self):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def set_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def check_password(self, raw_password):
raise NotImplementedError("Django doesn't provide a DB representation for AnonymousUser.")
def _get_groups(self):
return self._groups
groups = property(_get_groups)
def _get_user_permissions(self):
return self._user_permissions
user_permissions = property(_get_user_permissions)
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return _user_get_all_permissions(self, obj=obj)
def has_perm(self, perm, obj=None):
return _user_has_perm(self, perm, obj=obj)
def has_perms(self, perm_list, obj=None):
for perm in perm_list:
if not self.has_perm(perm, obj):
return False
return True
def has_module_perms(self, module):
return _user_has_module_perms(self, module)
def is_anonymous(self):
return True
def is_authenticated(self):
return False
def get_username(self):
return self.username
|
bsd-3-clause
|
NProfileAnalysisComputationalTool/npact
|
pynpact/pynpact/util.py
|
1
|
9619
|
import errno
import os
import os.path
import logging
import time
import hashlib
import tempfile
from contextlib import contextmanager
from functools import wraps
from path import Path
def reducehashdict(dict, keys):
"""pull the given keys out of the dictionary, return the reduced
dictionary and the sha1 hash of that set of key values.
"""
outdict = {}
h = hashlib.sha1()
# We go through in sorted order to ensure stability of the
# ordering between runs.
for k in sorted(keys):
val = dict.get(k)
if val is not None:
h.update(k)
h.update(str(val))
outdict[k] = val
if len(outdict):
return outdict, h.hexdigest()
else:
return outdict, None
def reducedict(dict_, keys):
out = {}
for k in keys:
if k in dict_:
out[k] = dict_[k]
return out
def hashdict(dict_):
h = hashlib.sha1()
for k in sorted(dict_.keys()):
val = dict_.get(k)
if val is not None:
h.update(k)
h.update(str(val))
return h.hexdigest()
class Hasher(object):
def __init__(self):
self.state = hashlib.sha1()
def hashdict(self, dict_):
for k in sorted(dict_.keys()):
val = dict_.get(k)
if val is not None:
self.state.update(k)
self.state.update(str(val))
return self
def hashfiletime(self, filename):
self.state.update(str(os.path.getmtime(filename)))
return self
def hashlist(self, lst):
for item in lst:
self.state.update(str(item))
return self
def hash(self, str_):
self.state.update(str_)
return self
def hexdigest(self):
return self.state.hexdigest()
def ensure_dir(dir, logger=None):
if not os.path.exists(dir):
try:
if logger: logger.debug("Making dir: %s", dir)
os.makedirs(dir)
if logger: logger.info("Created dir: %s", dir)
except OSError, e:
#not entirely sure why we are getting errors,
#http://docs.python.org/library/os.path.html#os.path.exists
#says this could be related to not being able to call
#os.stat, but I can do so from the command line python
#just fine.
if os.path.exists(dir):
if logger:
logger.debug("Erred, already exists: e.errno: %s", e.errno)
return
else:
raise
def withDir(dir, fn, *args, **kwargs):
olddir = os.getcwd()
try:
os.chdir(dir)
return fn(*args, **kwargs)
finally:
os.chdir(olddir)
def pprint_bytes(bytes):
suffix = 'B'
bytes = float(bytes)
if bytes >= 1024:
bytes = bytes / 1024
suffix = 'KB'
if bytes >= 1024:
bytes = bytes / 1024
suffix = 'MB'
if bytes >= 1024:
bytes = bytes / 1024
suffix = 'GB'
if bytes >= 1024:
bytes = bytes / 1024
suffix = 'TB'
return '%.2f%s' % (bytes, suffix)
def which(program):
def is_exe(fpath):
return os.path.exists(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def stream_to_handle(stream, handle, bufsize=8192):
bytes = 0
while True:
buf = stream.read(bufsize)
if buf == "": break # EOF
bytes += len(buf)
handle.write(buf)
return bytes
def stream_to_file(stream, path, bufsize=8192):
if hasattr(path, 'write'):
return stream_to_handle(stream, path, bufsize)
else:
with open(path, "wb") as h:
return stream_to_handle(stream, h, bufsize)
@contextmanager
def mkstemp_rename(destination, **kwargs):
"""For writing to a temporary file and then move it ontop of a
(possibly) existing file only when finished. This enables us to
perform long running operations on a file that other people might
be using and let everyone else see a consistent version.
* other args are passed to tempfile.mkstemp
Example::
with mkstemp_rename('foobar.txt') as f:
f.write('stuff\n')
"""
log = kwargs.pop('log', None)
kwargs.setdefault('dir', os.path.dirname(destination))
(fd, path) = tempfile.mkstemp(**kwargs)
path = Path(path)
try:
filelike = os.fdopen(fd, 'wb')
yield filelike
filelike.close()
path.chmod(0o0644)
path.rename(destination)
finally:
path.remove_p()
@contextmanager
def mkdtemp_rename(destination, chmod=None, **kwargs):
"""A wrapper for tempfile.mkdtemp that always cleans up.
This wrapper sets defaults based on the class values."""
log = kwargs.pop('log', None)
dest = Path(destination).normpath()
kwargs.setdefault('dir', dest.parent)
tmppath = Path(tempfile.mkdtemp(**kwargs))
try:
yield tmppath
try:
tmppath.chmod(0o0755)
tmppath.rename(dest)
except OSError as e:
if e.errno == errno.ENOENT:
# the tmppath didn't exist?!
log.exception("Shouldn't be here %r", tmppath)
raise
elif e.errno == errno.ENOTEMPTY:
log.debug("Target already existed")
else:
raise
finally:
tmppath.rmtree_p()
def replace_ext(base, newext):
base = Path(base)
if newext[0] == '.':
newext = newext[1:]
return base.stripext() + '.' + newext
def is_outofdate(filename, *dependencies):
"""Return true if the file is missing or not newer than all of its dependencies."""
if not os.path.exists(filename):
return True
mtime = os.path.getmtime(filename)
return any(os.path.getmtime(d) > mtime for d in dependencies if d)
def derivative_filename(base, part, replace_ext=True, outputdir=None):
"""Build the filename of a derivative product of the original
file."""
if not part[0] == ".":
part = "." + part
if outputdir is None:
outputdir = os.path.dirname(base)
filename = os.path.basename(base)
if replace_ext:
filename = os.path.splitext(filename)[0]
return os.path.join(outputdir, filename + part)
def safe_produce_new(outfilename, func, force=False, dependencies=[], **kwargs):
logger = kwargs.get('logger')
if force or is_outofdate(outfilename, *dependencies):
if logger:
logger.debug(
"Regenerating, checked:%d force:%r", len(dependencies), force)
with mkstemp_rename(outfilename, **kwargs) as f:
func(f)
return outfilename
def log_time(logger=logging, level=logging.INFO):
def decorator(func):
def wrapper(*args, **kwargs):
t1 = time.time()
result = func(*args, **kwargs)
t2 = time.time()
logger.log(level, "%s done, took %fs" % (func.func_name, (t2-t1)))
return result
return wraps(func)(wrapper)
return decorator
class Task(object):
"""Small object to hold state for a function call to happen later.
The point of this is to be a pickable closure looking thing.
E.g.
def adder(a,b):
return a + b
Task(adder, 1, 2)() == 3
"""
func = None
args = None
kwargs = None
def __init__(self, func, *args, **kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __call__(self):
return self.func(*self.args, **self.kwargs)
def delay(fn):
"""Create a Task out of the target function(and arguments)
I.e. make the target function serializable.
E.g.
delay(sum)([1, 2])
results in a callable Task object that can be serialized.
delay(sum)([1, 2])() == 3
"""
@wraps(fn)
def wrapper(*args, **kwargs):
return Task(fn, *args, **kwargs)
return wrapper
# Copright (c) 2011,2012,2013,2014 Accelerated Data Works
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
bsd-3-clause
|
hcjcch/blogbyflask
|
app/api_1_0/users.py
|
104
|
1702
|
from flask import jsonify, request, current_app, url_for
from . import api
from ..models import User, Post
@api.route('/users/<int:id>')
def get_user(id):
user = User.query.get_or_404(id)
return jsonify(user.to_json())
@api.route('/users/<int:id>/posts/')
def get_user_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
@api.route('/users/<int:id>/timeline/')
def get_user_followed_posts(id):
user = User.query.get_or_404(id)
page = request.args.get('page', 1, type=int)
pagination = user.followed_posts.order_by(Post.timestamp.desc()).paginate(
page, per_page=current_app.config['FLASKY_POSTS_PER_PAGE'],
error_out=False)
posts = pagination.items
prev = None
if pagination.has_prev:
prev = url_for('api.get_posts', page=page-1, _external=True)
next = None
if pagination.has_next:
next = url_for('api.get_posts', page=page+1, _external=True)
return jsonify({
'posts': [post.to_json() for post in posts],
'prev': prev,
'next': next,
'count': pagination.total
})
|
mit
|
sacsant/avocado-misc-tests
|
generic/ipistorm.py
|
4
|
4063
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2020 IBM
# Author: Harish <harish@linux.vnet.ibm.com>
#
import os
import time
import platform
from avocado import Test
from avocado import skipIf
from avocado.utils import archive, build, cpu, genio, linux_modules, process
from avocado.utils.software_manager import SoftwareManager
IS_POWER_NV = 'PowerNV' in genio.read_file('/proc/cpuinfo')
class DBLIPIStrom(Test):
"""
Storm IPIs to ensure DBL interrputs/XIVE-IPSs are triggered on XIVE
:avocado: tags=ipi,power,xive
"""
@skipIf(IS_POWER_NV, "This test is not supported on PowerNV platform")
def setUp(self):
"""
Install necessary packages to build the linux module
"""
if 'power' not in cpu.get_family():
self.cancel('Test Only supported on Power')
pkgs = ['gcc', 'make', 'kernel-devel']
smm = SoftwareManager()
for package in pkgs:
if not smm.check_installed(package) and not smm.install(package):
self.cancel('%s is needed for the test to be run' % package)
tarball = self.fetch_asset("ipistorm.zip", locations=[
"https://github.com/antonblanchard/ipistorm"
"/archive/master.zip"], expire='7d')
archive.extract(tarball, self.teststmpdir)
teststmpdir = os.path.join(self.teststmpdir, "ipistorm-master")
os.chdir(teststmpdir)
kernel_version = platform.uname()[2]
if not os.path.exists(os.path.join("/lib/modules", kernel_version)):
self.cancel(
"Modules of running kernel missing to build ipistorm module")
build.make(teststmpdir)
if not os.path.isfile(os.path.join(teststmpdir, 'ipistorm.ko')):
self.fail("No ipistorm.ko found, module build failed")
int_op = genio.read_file("/proc/interrupts")
if "XIVE" not in int_op:
self.cancel("Test is supported only with XIVE")
@staticmethod
def get_interrupts(string):
"""
Find the string and return a list of CPU stats for it
"""
int_op = genio.read_file("/proc/interrupts")
for line in int_op.splitlines():
if string in line:
line = line.split()[1: cpu.online_count() + 1]
return line
return []
def test(self):
"""
Check for the IPIs before and after ipistorm module
"""
pre_dbl_val = self.get_interrupts("DBL")
pre_ipi_val = self.get_interrupts("IPI")
if not linux_modules.module_is_loaded("ipistorm"):
if process.system(
"insmod ./ipistorm.ko", ignore_status=True, shell=True, sudo=True):
self.fail("Failed to insert ipistorm module")
else:
self.cancel(
"Cannot verify the DBL interrupt with module already loaded")
time.sleep(5)
process.system("rmmod ipistorm", ignore_status=True, sudo=True)
post_dbl_val = self.get_interrupts("DBL")
post_ipi_val = self.get_interrupts("IPI")
for idx, _ in enumerate(post_dbl_val):
if (int(post_dbl_val[idx]) <= int(pre_dbl_val[idx])) or\
(int(post_ipi_val[idx]) <= int(pre_ipi_val[idx])):
self.fail("Interrupts does not seemed to be used")
else:
self.log.info("Old DBL %s, New DBL: %s",
pre_dbl_val[idx], post_dbl_val[idx])
self.log.info("Old IPI %s, New IPI: %s",
pre_ipi_val[idx], post_ipi_val[idx])
|
gpl-2.0
|
jjdmol/LOFAR
|
CEP/PyBDSM/src/python/plotresults.py
|
1
|
29164
|
"""Plotting module
This module is used to display fits results.
"""
from image import *
from . import has_pl
if has_pl:
import matplotlib.pyplot as pl
import matplotlib.cm as cm
import matplotlib.patches as mpatches
from matplotlib.widgets import Button
from matplotlib.patches import Ellipse
from matplotlib.lines import Line2D
from matplotlib import collections
from math import log10
import functions as func
from const import fwsig
import os
import numpy as N
def plotresults(img, ch0_image=True, rms_image=True, mean_image=True,
ch0_islands=True, gresid_image=True, sresid_image=False,
gmodel_image=True, smodel_image=False, pyramid_srcs=False,
source_seds=False, ch0_flagged=False, pi_image=False,
psf_major=False, psf_minor=False, psf_pa=False, broadcast=False):
"""Show the results of a fit."""
global img_ch0, img_rms, img_mean, img_gaus_mod, img_shap_mod
global img_gaus_resid, img_shap_resid, pixels_per_beam, pix2sky
global vmin, vmax, vmin_cur, vmax_cur, ch0min, ch0max, img_pi
global low, fig, images, src_list, srcid_cur, sky2pix, markers
global img_psf_maj, img_psf_min, img_psf_pa, do_broadcast, samp_client
global samp_key, samp_gaul_table_url, samp_srl_table_url
if not has_pl:
print "\033[31;1mWARNING\033[0m: Matplotlib not found. Plotting is disabled."
return
if hasattr(img, 'samp_client'):
samp_client = img.samp_client
samp_key = img.samp_key
if hasattr(img, 'samp_srl_table_url'):
samp_srl_table_url = img.samp_srl_table_url
else:
samp_srl_table_url = None
if hasattr(img, 'samp_gaul_table_url'):
samp_gaul_table_url = img.samp_gaul_table_url
else:
samp_gaul_table_url = None
else:
samp_clent = None
samp_key = None
samp_srl_table_url = None
samp_gaul_table_url = None
do_broadcast = broadcast
# Define the images. The images are used both by imshow and by the
# on_press() and coord_format event handlers
pix2sky = img.pix2sky
sky2pix = img.sky2pix
gfactor = 2.0 * N.sqrt(2.0 * N.log(2.0))
pixels_per_beam = 2.0 * N.pi * (img.beam2pix(img.beam)[0]
* img.beam2pix(img.beam)[1]) / gfactor**2
# Construct lists of images, titles, etc.
images = []
titles = []
names = []
markers = []
img_gaus_mod = None # default needed for key press event
img_shap_mod = None # default needed for key press event
if ch0_image:
img_ch0 = img.ch0_arr
images.append(img_ch0)
titles.append('Original (ch0) Image\n(arbitrary logarithmic scale)')
names.append('ch0')
if ch0_islands:
img_ch0 = img.ch0_arr
images.append(img_ch0)
if hasattr(img, 'ngaus'):
if hasattr(img, 'ch0_pi_arr'):
ch0_str = 'Islands (hatched boundaries; red = PI only) and\nGaussians'
else:
ch0_str = 'Islands (hatched boundaries) and\nGaussians'
if hasattr(img, 'atrous_gaussians'):
ch0_str += ' (red = wavelet)'
titles.append(ch0_str)
else:
titles.append('Islands (hatched boundaries)')
names.append('ch0')
if ch0_flagged:
if not hasattr(img, 'ngaus'):
print 'Image was not fit with Gaussians. Skipping display of flagged Gaussians.'
else:
img_ch0 = img.ch0_arr
images.append(img_ch0)
titles.append('Flagged Gaussians')
names.append('ch0')
if pi_image:
if not hasattr(img, 'ch0_pi_arr'):
print 'Polarization module not run. Skipping PI image.'
else:
img_pi = img.ch0_pi_arr
images.append(img_pi)
titles.append('Polarized Intensity Image')
names.append('ch0_pi')
if rms_image:
img_rms = img.rms_arr
images.append(img_rms)
titles.append('Background rms Image')
names.append('rms')
if gresid_image:
if not hasattr(img, 'ngaus'):
print 'Image was not fit with Gaussians. Skipping residual Gaussian image.'
else:
img_gaus_resid = img.resid_gaus_arr
images.append(img_gaus_resid)
titles.append('Gaussian Residual Image')
names.append('gaus_resid')
if gmodel_image:
if not hasattr(img, 'ngaus'):
print 'Image was not fit with Gaussians. Skipping model Gaussian image.'
else:
img_gaus_mod = img.model_gaus_arr
images.append(img_gaus_mod)
titles.append('Gaussian Model Image')
names.append('gaus_mod')
if mean_image:
img_mean = img.mean_arr
images.append(img_mean)
titles.append('Background mean Image')
names.append('mean')
if sresid_image:
if img.opts.shapelet_do == False:
print 'Image was not decomposed into shapelets. Skipping residual shapelet image.'
else:
img_shap_resid = img.ch0_arr - img.model_shap_arr
images.append(img_shap_resid)
titles.append('Shapelet Residual Image')
names.append('shap_resid')
if smodel_image:
if img.opts.shapelet_do == False:
print 'Image was not decomposed into shapelets. Skipping model shapelet image.'
else:
img_shap_mod = img.model_shap_arr
images.append(img_shap_mod)
titles.append('Shapelet Model Image')
names.append('shap_mod')
if source_seds:
if img.opts.spectralindex_do == False:
print 'Source SEDs were not fit. Skipping source SED plots.'
else:
src_list = img.sources
sed_src = get_src(src_list, 0)
if sed_src is None:
print 'No sources found. Skipping source SED plots.'
else:
images.append('seds')
titles.append('')
names.append('seds')
srcid_cur = 0
if pyramid_srcs:
if img.opts.atrous_do == False:
print 'Image was not decomposed into wavelets. Skipping wavelet images.'
else:
# Get the unique j levels and store them. Only make subplots for
# occupied j levels
print 'Pyramidal source plots not yet supported.'
# j_list = []
# for p in img.pyrsrcs:
# for l in p.jlevels:
# j_list.append(l)
# j_set = set(j_list)
# j_with_gaus = list(j_set)
# index_first_waveplot = len(images)
# for i in range(len(j_with_gaus)):
# images.append('wavelets')
# names.append('pyrsrc'+str(i))
if psf_major or psf_minor or psf_pa:
if img.opts.psf_vary_do == False:
print 'PSF variation not calculated. Skipping PSF variation images.'
else:
if psf_major:
img_psf_maj = img.psf_vary_maj_arr*fwsig
images.append(img_psf_maj)
titles.append('PSF Major Axis FWHM (pixels)')
names.append('psf_maj')
if psf_minor:
img_psf_min = img.psf_vary_min_arr*fwsig
images.append(img_psf_min)
titles.append('PSF Minor Axis FWHM (pixels)')
names.append('psf_min')
if psf_pa:
img_psf_pa = img.psf_vary_pa_arr
images.append(img_psf_pa)
titles.append('PSF Pos. Angle FWhM (degrees)')
names.append('psf_pa')
if images == []:
print 'No images to display.'
return
im_mean = img.clipped_mean
im_rms = img.clipped_rms
if img.resid_gaus is None:
low = 1.1*abs(img.min_value)
else:
low = N.max([1.1*abs(img.min_value),1.1*abs(N.nanmin(img.resid_gaus))])
if low <= 0.0:
low = 1E-6
vmin_est = im_mean - im_rms*5.0 + low
if vmin_est <= 0.0:
vmin = N.log10(low)
else:
vmin = N.log10(vmin_est)
vmax = N.log10(im_mean + im_rms*30.0 + low)
ch0min = vmin
ch0max = N.log10(img.max_value + low)
vmin_cur = vmin
vmax_cur = vmax
origin = 'lower'
colours = ['m', 'b', 'c', 'g', 'y', 'k'] # reserve red ('r') for wavelets
styles = ['-', '-.', '--']
print '=' * 72
print 'NOTE -- With the mouse pointer in plot window:'
print ' Press "i" ........ : Get integrated flux densities and mean rms'
print ' values for the visible portion of the image'
print ' Press "m" ........ : Change min and max scaling values'
print ' Press "n" ........ : Show / hide island IDs'
print ' Press "0" ........ : Reset scaling to default'
if 'seds' in images:
print ' Press "c" ........ : Change source for SED plot'
if ch0_islands and hasattr(img, 'ngaus'):
print ' Click Gaussian ... : Print Gaussian and source IDs (zoom_rect mode, '
print ' toggled with the "zoom" button and indicated in '
print ' the lower right corner, must be off)'
if 'seds' in images:
print ' The SED plot will also show the chosen source.'
print '_' * 72
if len(images) > 1:
numx = 2
else:
numx = 1
numy = int(N.ceil(float(len(images))/float(numx)))
fig = pl.figure(figsize=(max(15, 10.0*float(numy)/float(numx)), 10.0))
fig.canvas.set_window_title('PyBDSM Fit Results for '+ img.filename)
gray_palette = cm.gray
gray_palette.set_bad('k')
for i, image in enumerate(images):
if image != 'wavelets' and image != 'seds':
if i == 0:
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ')'
else:
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ', sharex=ax1' + \
', sharey=ax1)'
exec cmd
if 'PSF' in titles[i]:
im = image
else:
im = N.log10(image + low)
if 'Islands' in titles[i]:
island_offsets_x = []
island_offsets_y = []
border_color = []
ax = pl.gca()
for iisl, isl in enumerate(img.islands):
xb, yb = isl.border
if hasattr(isl, '_pi'):
for c in range(len(xb)):
border_color.append('r')
else:
for c in range(len(xb)):
border_color.append('#afeeee')
island_offsets_x += xb.tolist()
island_offsets_y += yb.tolist()
marker = ax.text(N.max(xb)+2, N.max(yb), str(isl.island_id),
color='#afeeee', clip_on=True)
marker.set_visible(not marker.get_visible())
markers.append(marker)
# draw the gaussians with one colour per source or island
# (if gaul2srl was not run)
if hasattr(img, 'nsrc'):
nsrc = len(isl.sources)
for isrc in range(nsrc):
col = colours[isrc % 6]
style = styles[isrc/6 % 3]
src = isl.sources[isrc]
for g in src.gaussians:
if hasattr(g, 'valid'):
valid = g.valid
else:
valid = True
if g.jlevel == 0 and valid and g.gaus_num >= 0:
gidx = g.gaus_num
e = Ellipse(xy=g.centre_pix, width=g.size_pix[0],
height=g.size_pix[1], angle=g.size_pix[2]+90.0)
ax.add_artist(e)
e.set_picker(3)
e.set_clip_box(ax.bbox)
e.set_facecolor(col)
e.set_alpha(0.5)
e.gaus_id = gidx
e.src_id = src.source_id
e.jlevel = g.jlevel
e.isl_id = g.island_id
e.tflux = g.total_flux
e.pflux = g.peak_flux
e.centre_sky = g.centre_sky
if len(img.islands) > 0:
island_offsets = zip(N.array(island_offsets_x), N.array(island_offsets_y))
isl_borders = collections.AsteriskPolygonCollection(4, offsets=island_offsets, color=border_color,
transOffset=ax.transData, sizes=(10.0,))
ax.add_collection(isl_borders)
if hasattr(img, 'gaussians'):
for atrg in img.gaussians:
if atrg.jlevel > 0 and atrg.gaus_num >= 0:
col = 'r'
style = '-'
gidx = atrg.gaus_num
e = Ellipse(xy=atrg.centre_pix, width=atrg.size_pix[0], height=atrg.size_pix[1], angle=atrg.size_pix[2]+90.0)
ax.add_artist(e)
e.set_picker(3)
e.set_clip_box(ax.bbox)
e.set_edgecolor(col)
e.set_facecolor('none')
e.set_alpha(0.8)
e.gaus_id = gidx
e.src_id = atrg.source_id
e.jlevel = atrg.jlevel
e.isl_id = atrg.island_id
e.tflux = atrg.total_flux
e.pflux = atrg.peak_flux
e.centre_sky = atrg.centre_sky
if 'Flagged' in titles[i]:
for iisl, isl in enumerate(img.islands):
ax = pl.gca()
style = '-'
for ig, g in enumerate(isl.fgaul):
col = colours[ig % 6]
ellx, elly = func.drawellipse(g)
gline, = ax.plot(ellx, elly, color = col,
linestyle = style, picker=3)
gline.flag = g.flag
if 'PSF' in titles[i]:
cmd = 'ax' + str(i+1) + ".imshow(N.transpose(im), origin=origin, "\
"interpolation='nearest', cmap=gray_palette)"
else:
cmd = 'ax' + str(i+1) + ".imshow(N.transpose(im), origin=origin, "\
"interpolation='nearest',vmin=vmin, vmax=vmax, cmap=gray_palette)"
exec cmd
cmd = 'ax' + str(i+1) + '.format_coord = format_coord_'+names[i]
exec cmd
pl.title(titles[i])
elif image == 'seds':
cmd = 'ax' + str(i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(i+1) + ')'
exec cmd
ax = pl.gca()
plot_sed(sed_src, ax)
elif image == 'wavelets':
if i == index_first_waveplot:
for j in range(len(j_with_gaus)):
cmd = 'ax' + str(j+i+1) + ' = pl.subplot(' + str(numx) + \
', ' + str(numy) + ', ' + str(j+i+1) + ', sharex=ax1, '+\
'sharey=ax1)'
exec cmd
pl.title('Pyramidal Sources for\nWavelet Scale J = ' +
str(j_with_gaus[j]))
for pyr in img.pyrsrcs:
for iisl, isl in enumerate(pyr.islands):
jj = pyr.jlevels[iisl]
jindx = j_with_gaus.index(jj)
col = colours[pyr.pyr_id % 6]
ind = N.where(~isl.mask_active)
cmd = "ax" + str(jindx + index_first_waveplot + 1) + \
".plot(ind[0]+isl.origin[0], "\
"ind[1]+isl.origin[1], '.', color=col)"
exec cmd
fig.canvas.mpl_connect('key_press_event', on_press)
fig.canvas.mpl_connect('pick_event', on_pick)
pl.show()
pl.close('all')
def on_pick(event):
global images, srcid_cur, samp_client, samp_key, do_broadcast, samp_gaul_table_url, samp_srl_table_url
g = event.artist
if hasattr(g, 'gaus_id'):
gaus_id = g.gaus_id
src_id = g.src_id
isl_id = g.isl_id
tflux = g.tflux
pflux = g.pflux
wav_j = g.jlevel
if wav_j == 0:
print 'Gaussian #' + str(gaus_id) + ' (in src #' + str(src_id) + \
', isl #' + str(isl_id) + '): F_tot = ' + str(round(tflux,4)) + \
' Jy, F_peak = ' + str(round(pflux,4)) + ' Jy/beam'
else:
print 'Gaussian #' + str(gaus_id) + ' (in src #' + str(src_id) + \
', isl #' + str(isl_id) + ', wav #' + str(wav_j) + \
'): F_tot = ' + str(round(tflux,3)) + ' Jy, F_peak = ' + \
str(round(pflux,4)) + ' Jy/beam'
# Transmit src_id, gaus_id, and coordinates to SAMP Hub (if we are connected)
if do_broadcast and samp_key is not None:
if samp_gaul_table_url is not None:
func.send_highlight_row(samp_client, samp_key, samp_gaul_table_url, gaus_id)
if samp_srl_table_url is not None:
func.send_highlight_row(samp_client, samp_key, samp_srl_table_url, src_id)
func.send_coords(samp_client, samp_key, g.centre_sky)
# Change source SED
# First check that SEDs are being plotted and that the selected Gaussian
# is from the zeroth wavelet image
has_sed = False
if 'seds' in images and wav_j == 0:
has_sed = True
if not has_sed:
return
ax_indx = images.index('seds')
sed_src = get_src(src_list, src_id)
if srcid_cur == src_id:
return
srcid_cur = src_id
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] == 'seds':
plot_sed(sed_src, ax)
else:
print 'Flagged Gaussian (flag = ' + str(g.flag) + '; use "' + \
"help 'flagging_opts'" + '" for flag meanings)'
pl.draw()
def on_press(event):
"""Handle keypresses"""
from interface import raw_input_no_history
import numpy
global img_ch0, img_rms, img_mean, img_gaus_mod, img_shap_mod
global pixels_per_beam, vmin, vmax, vmin_cur, vmax_cur, img_pi
global ch0min, ch0max, low, fig, images, src_list, srcid_cur
global markers
if event.key == '0':
print 'Resetting limits to defaults (%.4f -- %.4f Jy/beam)' \
% (pow(10, vmin)-low,
pow(10, vmax)-low)
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
im = ax.get_images()[0]
im.set_clim(vmin, vmax)
vmin_cur = vmin
vmax_cur = vmax
pl.draw()
if event.key == 'm':
# Modify scaling
# First check that there are images to modify
has_image = False
for im in images:
if isinstance(im, numpy.ndarray):
has_image = True
if not has_image:
return
minscl = 'a'
while isinstance(minscl, str):
try:
if minscl == '':
minscl = pow(10, vmin_cur) - low
break
minscl = float(minscl)
except ValueError:
prompt = "Enter min value (current = %.4f Jy/beam) : " % (pow(10, vmin_cur)-low,)
try:
minscl = raw_input_no_history(prompt)
except RuntimeError:
print 'Sorry, unable to change scaling.'
return
minscl = N.log10(minscl + low)
maxscl = 'a'
while isinstance(maxscl, str):
try:
if maxscl == '':
maxscl = pow(10, vmax_cur) - low
break
maxscl = float(maxscl)
except ValueError:
prompt = "Enter max value (current = %.4f Jy/beam) : " % (pow(10, vmax_cur)-low,)
try:
maxscl = raw_input_no_history(prompt)
except RuntimeError:
print 'Sorry, unable to change scaling.'
return
maxscl = N.log10(maxscl + low)
if maxscl <= minscl:
print 'Max value must be greater than min value!'
return
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
im = ax.get_images()[0]
im.set_clim(minscl, maxscl)
vmin_cur = minscl
vmax_cur = maxscl
pl.draw()
if event.key == 'c':
# Change source SED
# First check that SEDs are being plotted
has_sed = False
if 'seds' in images:
has_sed = True
if not has_sed:
return
srcid = 'a'
while isinstance(srcid, str):
try:
if srcid == '':
srcid = srcid_cur
break
srcid = int(srcid)
except ValueError:
prompt = "Enter source ID (current = %i) : " % (srcid_cur,)
try:
srcid = raw_input_no_history(prompt)
except RuntimeError:
print 'Sorry, unable to change source.'
return
ax_indx = images.index('seds')
sed_src = get_src(src_list, srcid)
if sed_src is None:
print 'Source not found!'
return
srcid_cur = srcid
axes_list = fig.get_axes()
for axindx, ax in enumerate(axes_list):
if images[axindx] == 'seds':
plot_sed(sed_src, ax)
pl.draw()
if event.key == 'i':
# Print info about visible region
has_image = False
axes_list = fig.get_axes()
# Get limits of visible region
for axindx, ax in enumerate(axes_list):
if images[axindx] != 'wavelets' and images[axindx] != 'seds':
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
has_image = True
break
if not has_image:
return
if xmin < 0:
xmin = 0
if xmax > img_ch0.shape[0]:
xmax = img_ch0.shape[0]
if ymin < 0:
ymin = 0
if ymax > img_ch0.shape[1]:
ymax = img_ch0.shape[1]
flux = N.nansum(img_ch0[xmin:xmax, ymin:ymax])/pixels_per_beam
mask = N.isnan(img_ch0[xmin:xmax, ymin:ymax])
num_pix_unmasked = float(N.size(N.where(mask == False), 1))
mean_rms = N.nansum(img_rms[xmin:xmax, ymin:ymax])/num_pix_unmasked
mean_map_flux = N.nansum(img_mean[xmin:xmax, ymin:ymax])/pixels_per_beam
if img_gaus_mod is None:
gaus_mod_flux = 0.0
else:
gaus_mod_flux = N.nansum(img_gaus_mod[xmin:xmax, ymin:ymax])/pixels_per_beam
print 'Visible region (%i:%i, %i:%i) :' % (xmin, xmax, ymin, ymax)
print ' ch0 flux density from sum of pixels ... : %f Jy'\
% (flux,)
print ' Background mean map flux density ...... : %f Jy'\
% (mean_map_flux,)
print ' Gaussian model flux density ........... : %f Jy'\
% (gaus_mod_flux,)
if img_shap_mod is not None:
shap_mod_flux = N.nansum(img_shap_mod[xmin:xmax, ymin:ymax])/pixels_per_beam
print ' Shapelet model flux density ........... : %f Jy'\
% (shap_mod_flux,)
print ' Mean rms (from rms map) ............... : %f Jy/beam'\
% (mean_rms,)
if event.key == 'n':
# Show/Hide island numbers
if markers:
for marker in markers:
marker.set_visible(not marker.get_visible())
pl.draw()
# The following functions add ra, dec and flux density to the
# coordinates in the lower-right-hand corner of the figure window.
# Since each axis needs its own function (to return its particular
# flux), we need a separate function for each subplot.
def format_coord_ch0(x, y):
"""Custom coordinate format for ch0 image"""
global img_ch0
im = img_ch0
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_ch0_pi(x, y):
"""Custom coordinate format for ch0 image"""
global img_pi
im = img_pi
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_rms(x, y):
"""Custom coordinate format for rms image"""
global img_rms
im = img_rms
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_mean(x, y):
"""Custom coordinate format for mean image"""
global img_mean
im = img_mean
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_gaus_mod(x, y):
"""Custom coordinate format for Gaussian model image"""
global img_gaus_mod
im = img_gaus_mod
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_shap_mod(x, y):
"""Custom coordinate format for shapelet model image"""
global img_shap_mod
im = img_shap_mod
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_gaus_resid(x, y):
"""Custom coordinate format for Gaussian residual image"""
global img_gaus_resid
im = img_gaus_resid
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_shap_resid(x, y):
"""Custom coordinate format for shapelet residual image"""
global img_shap_resid
im = img_shap_resid
coord_str = make_coord_str(x, y, im)
return coord_str
def format_coord_psf_maj(x, y):
"""Custom coordinate format for PSF major image"""
global img_psf_maj
im = img_psf_maj
coord_str = make_coord_str(x, y, im, unit='arcsec')
return coord_str
def format_coord_psf_min(x, y):
"""Custom coordinate format for PSF minor image"""
global img_psf_min
im = img_psf_min
coord_str = make_coord_str(x, y, im, unit='arcsec')
return coord_str
def format_coord_psf_pa(x, y):
"""Custom coordinate format for PSF pos. ang. image"""
global img_psf_pa
im = img_psf_pa
coord_str = make_coord_str(x, y, im, unit='degrees')
return coord_str
def xy_to_radec_str(x, y):
"""Converts x, y in image coords to a sexigesimal string"""
from output import ra2hhmmss, dec2ddmmss
global pix2sky
ra, dec = pix2sky([x, y])
ra = ra2hhmmss(ra)
sra = str(ra[0]).zfill(2)+':'+str(ra[1]).zfill(2)+':'+str("%.1f" % (ra[2])).zfill(3)
dec = dec2ddmmss(dec)
decsign = ('-' if dec[3] < 0 else '+')
sdec = decsign+str(dec[0]).zfill(2)+':'+str(dec[1]).zfill(2)+':'+str("%.1f" % (dec[2])).zfill(3)
return sra, sdec
def make_coord_str(x, y, im, unit='Jy/beam'):
"""Makes the x, y, ra, dec, flux string"""
rastr, decstr = xy_to_radec_str(x, y)
col = int(x + 0.5)
row = int(y + 0.5)
numcols, numrows = im.shape
if col >= 0 and col < numcols\
and row >= 0 and row < numrows:
z = im[col, row]
return 'x=%1.1f, y=%1.1f, RA=%s, Dec=%s, F=%+1.4f %s' % (x, y, rastr, decstr, z, unit)
else:
return 'x=%1.1f, y=%1.1f' % (x, y)
def plot_sed(src, ax):
"""Plots the SED for source 'src' to axis 'ax'"""
global sky2pix
global fig
ax.cla()
norm = src.spec_norm
spin = src.spec_indx
espin = src.e_spec_indx
y = N.array(src.specin_flux)
ey = N.array(src.specin_fluxE)
x = N.array(src.specin_freq)
ax.errorbar(N.log10(x/1e6), N.log10(y), yerr=ey/y, fmt='bo')
ax.plot(N.log10(x/1e6), N.log10(norm)+N.log10(x/src.specin_freq0)*spin,
'-g', label="alpha = %.2f" % (spin,))
pos = sky2pix(src.posn_sky_centroid)
xpos = int(pos[0])
ypos = int(pos[1])
pl.title('SED of source #'+str(src.source_id)+'\n'
+'(x = '+str(xpos)+', y = '+str(ypos)+')')
pl.xlabel('log Frequency (MHz)')
pl.ylabel('log Flux Density (Jy)')
pl.legend()
def get_src(src_list, srcid):
"""Returns the source for srcid or None if not found"""
for src in src_list:
if src.source_id == srcid:
return src
return None
|
gpl-3.0
|
thinkopensolutions/geraldo
|
site/newsite/django_1_0/django/db/backends/postgresql_psycopg2/base.py
|
14
|
3076
|
"""
PostgreSQL database backend for Django.
Requires psycopg 2: http://initd.org/projects/psycopg2
"""
from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
from django.utils.safestring import SafeUnicode
try:
import psycopg2 as Database
import psycopg2.extensions
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg2 module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_adapter(SafeUnicode, psycopg2.extensions.QuotedString)
class DatabaseFeatures(BaseDatabaseFeatures):
needs_datetime_string_cast = False
class DatabaseOperations(PostgresqlDatabaseOperations):
def last_executed_query(self, cursor, sql, params):
# With psycopg2, cursor objects have a "query" attribute that is the
# exact query sent to the database. See docs here:
# http://www.initd.org/tracker/psycopg/wiki/psycopg2_documentation#postgresql-status-message-and-executed-query
return cursor.query
class DatabaseWrapper(BaseDatabaseWrapper):
features = DatabaseFeatures()
ops = DatabaseOperations()
operators = {
'exact': '= %s',
'iexact': 'ILIKE %s',
'contains': 'LIKE %s',
'icontains': 'ILIKE %s',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'ILIKE %s',
'iendswith': 'ILIKE %s',
}
def _cursor(self, settings):
set_tz = False
if self.connection is None:
set_tz = True
if settings.DATABASE_NAME == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify DATABASE_NAME in your Django settings file.")
conn_string = "dbname=%s" % settings.DATABASE_NAME
if settings.DATABASE_USER:
conn_string = "user=%s %s" % (settings.DATABASE_USER, conn_string)
if settings.DATABASE_PASSWORD:
conn_string += " password='%s'" % settings.DATABASE_PASSWORD
if settings.DATABASE_HOST:
conn_string += " host=%s" % settings.DATABASE_HOST
if settings.DATABASE_PORT:
conn_string += " port=%s" % settings.DATABASE_PORT
self.connection = Database.connect(conn_string, **self.options)
self.connection.set_isolation_level(1) # make transactions transparent to all cursors
self.connection.set_client_encoding('UTF8')
cursor = self.connection.cursor()
cursor.tzinfo_factory = None
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings.TIME_ZONE])
return cursor
|
lgpl-3.0
|
Anik1199/android_kernel_mediatek_sprout
|
scripts/rt-tester/rt-tester.py
|
11005
|
5307
|
#!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <tglx@linutronix.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
|
gpl-2.0
|
schets/scikit-learn
|
sklearn/utils/_scipy_sparse_lsqr_backport.py
|
378
|
18021
|
"""Sparse Equations and Least Squares.
The original Fortran code was written by C. C. Paige and M. A. Saunders as
described in
C. C. Paige and M. A. Saunders, LSQR: An algorithm for sparse linear
equations and sparse least squares, TOMS 8(1), 43--71 (1982).
C. C. Paige and M. A. Saunders, Algorithm 583; LSQR: Sparse linear
equations and least-squares problems, TOMS 8(2), 195--209 (1982).
It is licensed under the following BSD license:
Copyright (c) 2006, Systems Optimization Laboratory
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Stanford University nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The Fortran code was translated to Python for use in CVXOPT by Jeffery
Kline with contributions by Mridul Aanjaneya and Bob Myhill.
Adapted for SciPy by Stefan van der Walt.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['lsqr']
import numpy as np
from math import sqrt
from scipy.sparse.linalg.interface import aslinearoperator
eps = np.finfo(np.float64).eps
def _sym_ortho(a, b):
"""
Stable implementation of Givens rotation.
Notes
-----
The routine 'SymOrtho' was added for numerical stability. This is
recommended by S.-C. Choi in [1]_. It removes the unpleasant potential of
``1/eps`` in some important places (see, for example text following
"Compute the next plane rotation Qk" in minres.py).
References
----------
.. [1] S.-C. Choi, "Iterative Methods for Singular Linear Equations
and Least-Squares Problems", Dissertation,
http://www.stanford.edu/group/SOL/dissertations/sou-cheng-choi-thesis.pdf
"""
if b == 0:
return np.sign(a), 0, abs(a)
elif a == 0:
return 0, np.sign(b), abs(b)
elif abs(b) > abs(a):
tau = a / b
s = np.sign(b) / sqrt(1 + tau * tau)
c = s * tau
r = b / s
else:
tau = b / a
c = np.sign(a) / sqrt(1+tau*tau)
s = c * tau
r = a / c
return c, s, r
def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8,
iter_lim=None, show=False, calc_var=False):
"""Find the least-squares solution to a large, sparse, linear system
of equations.
The function solves ``Ax = b`` or ``min ||b - Ax||^2`` or
``min ||Ax - b||^2 + d^2 ||x||^2``.
The matrix A may be square or rectangular (over-determined or
under-determined), and may have any rank.
::
1. Unsymmetric equations -- solve A*x = b
2. Linear least squares -- solve A*x = b
in the least-squares sense
3. Damped least squares -- solve ( A )*x = ( b )
( damp*I ) ( 0 )
in the least-squares sense
Parameters
----------
A : {sparse matrix, ndarray, LinearOperatorLinear}
Representation of an m-by-n matrix. It is required that
the linear operator can produce ``Ax`` and ``A^T x``.
b : (m,) ndarray
Right-hand side vector ``b``.
damp : float
Damping coefficient.
atol, btol : float, default 1.0e-8
Stopping tolerances. If both are 1.0e-9 (say), the final
residual norm should be accurate to about 9 digits. (The
final x will usually have fewer correct digits, depending on
cond(A) and the size of damp.)
conlim : float
Another stopping tolerance. lsqr terminates if an estimate of
``cond(A)`` exceeds `conlim`. For compatible systems ``Ax =
b``, `conlim` could be as large as 1.0e+12 (say). For
least-squares problems, conlim should be less than 1.0e+8.
Maximum precision can be obtained by setting ``atol = btol =
conlim = zero``, but the number of iterations may then be
excessive.
iter_lim : int
Explicit limitation on number of iterations (for safety).
show : bool
Display an iteration log.
calc_var : bool
Whether to estimate diagonals of ``(A'A + damp^2*I)^{-1}``.
Returns
-------
x : ndarray of float
The final solution.
istop : int
Gives the reason for termination.
1 means x is an approximate solution to Ax = b.
2 means x approximately solves the least-squares problem.
itn : int
Iteration number upon termination.
r1norm : float
``norm(r)``, where ``r = b - Ax``.
r2norm : float
``sqrt( norm(r)^2 + damp^2 * norm(x)^2 )``. Equal to `r1norm` if
``damp == 0``.
anorm : float
Estimate of Frobenius norm of ``Abar = [[A]; [damp*I]]``.
acond : float
Estimate of ``cond(Abar)``.
arnorm : float
Estimate of ``norm(A'*r - damp^2*x)``.
xnorm : float
``norm(x)``
var : ndarray of float
If ``calc_var`` is True, estimates all diagonals of
``(A'A)^{-1}`` (if ``damp == 0``) or more generally ``(A'A +
damp^2*I)^{-1}``. This is well defined if A has full column
rank or ``damp > 0``. (Not sure what var means if ``rank(A)
< n`` and ``damp = 0.``)
Notes
-----
LSQR uses an iterative method to approximate the solution. The
number of iterations required to reach a certain accuracy depends
strongly on the scaling of the problem. Poor scaling of the rows
or columns of A should therefore be avoided where possible.
For example, in problem 1 the solution is unaltered by
row-scaling. If a row of A is very small or large compared to
the other rows of A, the corresponding row of ( A b ) should be
scaled up or down.
In problems 1 and 2, the solution x is easily recovered
following column-scaling. Unless better information is known,
the nonzero columns of A should be scaled so that they all have
the same Euclidean norm (e.g., 1.0).
In problem 3, there is no freedom to re-scale if damp is
nonzero. However, the value of damp should be assigned only
after attention has been paid to the scaling of A.
The parameter damp is intended to help regularize
ill-conditioned systems, by preventing the true solution from
being very large. Another aid to regularization is provided by
the parameter acond, which may be used to terminate iterations
before the computed solution becomes very large.
If some initial estimate ``x0`` is known and if ``damp == 0``,
one could proceed as follows:
1. Compute a residual vector ``r0 = b - A*x0``.
2. Use LSQR to solve the system ``A*dx = r0``.
3. Add the correction dx to obtain a final solution ``x = x0 + dx``.
This requires that ``x0`` be available before and after the call
to LSQR. To judge the benefits, suppose LSQR takes k1 iterations
to solve A*x = b and k2 iterations to solve A*dx = r0.
If x0 is "good", norm(r0) will be smaller than norm(b).
If the same stopping tolerances atol and btol are used for each
system, k1 and k2 will be similar, but the final solution x0 + dx
should be more accurate. The only way to reduce the total work
is to use a larger stopping tolerance for the second system.
If some value btol is suitable for A*x = b, the larger value
btol*norm(b)/norm(r0) should be suitable for A*dx = r0.
Preconditioning is another way to reduce the number of iterations.
If it is possible to solve a related system ``M*x = b``
efficiently, where M approximates A in some helpful way (e.g. M -
A has low rank or its elements are small relative to those of A),
LSQR may converge more rapidly on the system ``A*M(inverse)*z =
b``, after which x can be recovered by solving M*x = z.
If A is symmetric, LSQR should not be used!
Alternatives are the symmetric conjugate-gradient method (cg)
and/or SYMMLQ. SYMMLQ is an implementation of symmetric cg that
applies to any symmetric A and will converge more rapidly than
LSQR. If A is positive definite, there are other implementations
of symmetric cg that require slightly less work per iteration than
SYMMLQ (but will take the same number of iterations).
References
----------
.. [1] C. C. Paige and M. A. Saunders (1982a).
"LSQR: An algorithm for sparse linear equations and
sparse least squares", ACM TOMS 8(1), 43-71.
.. [2] C. C. Paige and M. A. Saunders (1982b).
"Algorithm 583. LSQR: Sparse linear equations and least
squares problems", ACM TOMS 8(2), 195-209.
.. [3] M. A. Saunders (1995). "Solution of sparse rectangular
systems using LSQR and CRAIG", BIT 35, 588-604.
"""
A = aslinearoperator(A)
if len(b.shape) > 1:
b = b.squeeze()
m, n = A.shape
if iter_lim is None:
iter_lim = 2 * n
var = np.zeros(n)
msg = ('The exact solution is x = 0 ',
'Ax - b is small enough, given atol, btol ',
'The least-squares solution is good enough, given atol ',
'The estimate of cond(Abar) has exceeded conlim ',
'Ax - b is small enough for this machine ',
'The least-squares solution is good enough for this machine',
'Cond(Abar) seems to be too large for this machine ',
'The iteration limit has been reached ')
if show:
print(' ')
print('LSQR Least-squares solution of Ax = b')
str1 = 'The matrix A has %8g rows and %8g cols' % (m, n)
str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var)
str3 = 'atol = %8.2e conlim = %8.2e' % (atol, conlim)
str4 = 'btol = %8.2e iter_lim = %8g' % (btol, iter_lim)
print(str1)
print(str2)
print(str3)
print(str4)
itn = 0
istop = 0
nstop = 0
ctol = 0
if conlim > 0:
ctol = 1/conlim
anorm = 0
acond = 0
dampsq = damp**2
ddnorm = 0
res2 = 0
xnorm = 0
xxnorm = 0
z = 0
cs2 = -1
sn2 = 0
"""
Set up the first vectors u and v for the bidiagonalization.
These satisfy beta*u = b, alfa*v = A'u.
"""
__xm = np.zeros(m) # a matrix for temporary holding
__xn = np.zeros(n) # a matrix for temporary holding
v = np.zeros(n)
u = b
x = np.zeros(n)
alfa = 0
beta = np.linalg.norm(u)
w = np.zeros(n)
if beta > 0:
u = (1/beta) * u
v = A.rmatvec(u)
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1/alfa) * v
w = v.copy()
rhobar = alfa
phibar = beta
bnorm = beta
rnorm = beta
r1norm = rnorm
r2norm = rnorm
# Reverse the order here from the original matlab code because
# there was an error on return when arnorm==0
arnorm = alfa * beta
if arnorm == 0:
print(msg[0])
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
head1 = ' Itn x[0] r1norm r2norm '
head2 = ' Compatible LS Norm A Cond A'
if show:
print(' ')
print(head1, head2)
test1 = 1
test2 = alfa / beta
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
print(str1, str2, str3)
# Main iteration loop.
while itn < iter_lim:
itn = itn + 1
"""
% Perform the next step of the bidiagonalization to obtain the
% next beta, u, alfa, v. These satisfy the relations
% beta*u = a*v - alfa*u,
% alfa*v = A'*u - beta*v.
"""
u = A.matvec(v) - alfa * u
beta = np.linalg.norm(u)
if beta > 0:
u = (1/beta) * u
anorm = sqrt(anorm**2 + alfa**2 + beta**2 + damp**2)
v = A.rmatvec(u) - beta * v
alfa = np.linalg.norm(v)
if alfa > 0:
v = (1 / alfa) * v
# Use a plane rotation to eliminate the damping parameter.
# This alters the diagonal (rhobar) of the lower-bidiagonal matrix.
rhobar1 = sqrt(rhobar**2 + damp**2)
cs1 = rhobar / rhobar1
sn1 = damp / rhobar1
psi = sn1 * phibar
phibar = cs1 * phibar
# Use a plane rotation to eliminate the subdiagonal element (beta)
# of the lower-bidiagonal matrix, giving an upper-bidiagonal matrix.
cs, sn, rho = _sym_ortho(rhobar1, beta)
theta = sn * alfa
rhobar = -cs * alfa
phi = cs * phibar
phibar = sn * phibar
tau = sn * phi
# Update x and w.
t1 = phi / rho
t2 = -theta / rho
dk = (1 / rho) * w
x = x + t1 * w
w = v + t2 * w
ddnorm = ddnorm + np.linalg.norm(dk)**2
if calc_var:
var = var + dk**2
# Use a plane rotation on the right to eliminate the
# super-diagonal element (theta) of the upper-bidiagonal matrix.
# Then use the result to estimate norm(x).
delta = sn2 * rho
gambar = -cs2 * rho
rhs = phi - delta * z
zbar = rhs / gambar
xnorm = sqrt(xxnorm + zbar**2)
gamma = sqrt(gambar**2 + theta**2)
cs2 = gambar / gamma
sn2 = theta / gamma
z = rhs / gamma
xxnorm = xxnorm + z**2
# Test for convergence.
# First, estimate the condition of the matrix Abar,
# and the norms of rbar and Abar'rbar.
acond = anorm * sqrt(ddnorm)
res1 = phibar**2
res2 = res2 + psi**2
rnorm = sqrt(res1 + res2)
arnorm = alfa * abs(tau)
# Distinguish between
# r1norm = ||b - Ax|| and
# r2norm = rnorm in current code
# = sqrt(r1norm^2 + damp^2*||x||^2).
# Estimate r1norm from
# r1norm = sqrt(r2norm^2 - damp^2*||x||^2).
# Although there is cancellation, it might be accurate enough.
r1sq = rnorm**2 - dampsq * xxnorm
r1norm = sqrt(abs(r1sq))
if r1sq < 0:
r1norm = -r1norm
r2norm = rnorm
# Now use these norms to estimate certain other quantities,
# some of which will be small near a solution.
test1 = rnorm / bnorm
test2 = arnorm / (anorm * rnorm + eps)
test3 = 1 / (acond + eps)
t1 = test1 / (1 + anorm * xnorm / bnorm)
rtol = btol + atol * anorm * xnorm / bnorm
# The following tests guard against extremely small values of
# atol, btol or ctol. (The user may have set any or all of
# the parameters atol, btol, conlim to 0.)
# The effect is equivalent to the normal tests using
# atol = eps, btol = eps, conlim = 1/eps.
if itn >= iter_lim:
istop = 7
if 1 + test3 <= 1:
istop = 6
if 1 + test2 <= 1:
istop = 5
if 1 + t1 <= 1:
istop = 4
# Allow for tolerances set by the user.
if test3 <= ctol:
istop = 3
if test2 <= atol:
istop = 2
if test1 <= rtol:
istop = 1
# See if it is time to print something.
prnt = False
if n <= 40:
prnt = True
if itn <= 10:
prnt = True
if itn >= iter_lim-10:
prnt = True
# if itn%10 == 0: prnt = True
if test3 <= 2*ctol:
prnt = True
if test2 <= 10*atol:
prnt = True
if test1 <= 10*rtol:
prnt = True
if istop != 0:
prnt = True
if prnt:
if show:
str1 = '%6g %12.5e' % (itn, x[0])
str2 = ' %10.3e %10.3e' % (r1norm, r2norm)
str3 = ' %8.1e %8.1e' % (test1, test2)
str4 = ' %8.1e %8.1e' % (anorm, acond)
print(str1, str2, str3, str4)
if istop != 0:
break
# End of iteration loop.
# Print the stopping condition.
if show:
print(' ')
print('LSQR finished')
print(msg[istop])
print(' ')
str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm)
str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm)
str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm)
str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm)
print(str1 + ' ' + str2)
print(str3 + ' ' + str4)
print(' ')
return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var
|
bsd-3-clause
|
jteehan/cfme_tests
|
cfme/intelligence/reports/menus.py
|
5
|
4810
|
# -*- coding: utf-8 -*-
"""Module handling report menus contents"""
from contextlib import contextmanager
from . import Report
from cfme.fixtures import pytest_selenium as sel
from cfme.intelligence.reports.ui_elements import FolderManager
from cfme.web_ui import Region, BootstrapTreeview, Tree, accordion, form_buttons
from cfme.web_ui.multibox import MultiBoxSelect
from utils import version
from utils.appliance.implementations.ui import navigate_to
from utils.log import logger
def reports_tree():
if version.current_version() >= '5.7':
return BootstrapTreeview("menu_roles_treebox")
else:
return Tree("//div[@id='menu_roles_treebox']/ul")
manager = FolderManager("//div[@id='folder_lists']/table")
report_select = MultiBoxSelect(
"//select[@id='available_reports']",
"//select[@id='selected_reports']",
"//a[@title='Move selected reports right']/img",
"//a[@title='Move selected reports left']/img",
)
buttons = Region(locators=dict(
commit="//a[@title='Commit report management changes']/img",
discard="//a[@title='Discard report management changes']/img",
))
default_button = form_buttons.FormButton("Reset All menus to CFME defaults")
def go_to_group(group_name):
navigate_to(Report, 'EditReportMenus')
accordion.tree("Edit Report Menus", "All EVM Groups", group_name)
def get_folders(group):
"""Returns list of folders for given user group.
Args:
group: User group to check.
"""
go_to_group(group)
reports_tree().click_path("Top Level")
return manager.fields
def get_subfolders(group, folder):
"""Returns list of sub-folders for given user group and folder.
Args:
group: User group to check.
folder: Folder to read.
"""
go_to_group(group)
reports_tree().click_path("Top Level", folder)
return manager.fields
def add_folder(group, folder):
"""Adds a folder under top-level.
Args:
group: User group.
folder: Name of the new folder.
"""
with manage_folder() as top_level:
top_level.add(folder)
def add_subfolder(group, folder, subfolder):
"""Adds a subfolder under specified folder.
Args:
group: User group.
folder: Name of the folder.
subfolder: Name of the new subdfolder.
"""
with manage_folder(folder) as fldr:
fldr.add(subfolder)
def reset_to_default(group):
"""Clicks the `Default` button.
Args:
group: Group to set to Default
"""
go_to_group(group)
sel.click(default_button)
sel.click(form_buttons.save)
@contextmanager
def manage_folder(group, folder=None):
"""Context manager to use when modifying the folder contents.
You can use manager's :py:meth:`FolderManager.bail_out` classmethod to end and discard the
changes done inside the with block. This context manager does not give the manager as a value to
the with block so you have to import and use the :py:class:`FolderManager` class manually.
Args:
group: User group.
folder: Which folder to manage. If None, top-level will be managed.
Returns: Context-managed :py:class:`cfme.intelligence.reports.ui_elements.FolderManager` inst.
"""
go_to_group(group)
if folder is None:
reports_tree().click_path("Top Level")
else:
reports_tree().click_path("Top Level", folder)
try:
yield manager
except FolderManager._BailOut:
logger.info("Discarding editation modifications on %s", str(repr(manager)))
manager.discard()
except:
# In case of any exception, nothing will be saved
manager.discard()
raise # And reraise the exception
else:
# If no exception happens, save!
manager.commit()
form_buttons.save()
@contextmanager
def manage_subfolder(group, folder, subfolder):
"""Context manager to use when modifying the subfolder contents.
You can use manager's :py:meth:`FolderManager.bail_out` classmethod to end and discard the
changes done inside the with block.
Args:
group: User group.
folder: Parent folder name.
subfolder: Subfodler name to manage.
Returns: Context-managed :py:class:`cfme.intelligence.reports.ui_elements.FolderManager` inst.
"""
go_to_group(group)
reports_tree().click_path("Top Level", folder, subfolder)
try:
yield report_select
except FolderManager._BailOut:
logger.info("Discarding editation modifications on %s", str(repr(manager)))
manager.discard()
except:
# In case of any exception, nothing will be saved
manager.discard()
raise # And reraise the exception
else:
# If no exception happens, save!
manager.commit()
form_buttons.save()
|
gpl-2.0
|
ofayans/freeipa
|
ipaserver/plugins/pkinit.py
|
1
|
2832
|
# Authors:
# Simo Sorce <ssorce@redhat.com>
#
# Copyright (C) 2010 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ipalib import api, errors
from ipalib import Str
from ipalib import Object, Command
from ipalib import _
from ipalib.plugable import Registry
from ipapython.dn import DN
__doc__ = _("""
Kerberos pkinit options
Enable or disable anonymous pkinit using the principal
WELLKNOWN/ANONYMOUS@REALM. The server must have been installed with
pkinit support.
EXAMPLES:
Enable anonymous pkinit:
ipa pkinit-anonymous enable
Disable anonymous pkinit:
ipa pkinit-anonymous disable
For more information on anonymous pkinit see:
http://k5wiki.kerberos.org/wiki/Projects/Anonymous_pkinit
""")
register = Registry()
@register()
class pkinit(Object):
"""
PKINIT Options
"""
object_name = _('pkinit')
label=_('PKINIT')
def valid_arg(ugettext, action):
"""
Accepts only Enable/Disable.
"""
a = action.lower()
if a != 'enable' and a != 'disable':
raise errors.ValidationError(
name='action',
error=_('Unknown command %s') % action
)
@register()
class pkinit_anonymous(Command):
__doc__ = _('Enable or Disable Anonymous PKINIT.')
princ_name = 'WELLKNOWN/ANONYMOUS@%s' % api.env.realm
default_dn = DN(('krbprincipalname', princ_name), ('cn', api.env.realm), ('cn', 'kerberos'), api.env.basedn)
takes_args = (
Str('action', valid_arg),
)
def execute(self, action, **options):
ldap = self.api.Backend.ldap2
set_lock = False
lock = None
entry_attrs = ldap.get_entry(self.default_dn, ['nsaccountlock'])
if 'nsaccountlock' in entry_attrs:
lock = entry_attrs['nsaccountlock'][0].lower()
if action.lower() == 'enable':
if lock == 'true':
set_lock = True
lock = None
elif action.lower() == 'disable':
if lock != 'true':
set_lock = True
lock = 'TRUE'
if set_lock:
entry_attrs['nsaccountlock'] = lock
ldap.update_entry(entry_attrs)
return dict(result=True)
|
gpl-3.0
|
feist/pcs
|
pcs/lib/env_tools.py
|
1
|
1365
|
from pcs.lib.cib.resource import remote_node, guest_node
from pcs.lib.xml_tools import get_root
def get_existing_nodes_names(corosync_conf=None, cib=None):
return __get_nodes_names(*__get_nodes(corosync_conf, cib))
def get_existing_nodes_names_addrs(corosync_conf=None, cib=None):
corosync_nodes, remote_and_guest_nodes = __get_nodes(corosync_conf, cib)
return (
__get_nodes_names(corosync_nodes, remote_and_guest_nodes),
__get_nodes_addrs(corosync_nodes, remote_and_guest_nodes),
)
def __get_nodes(corosync_conf=None, cib=None):
corosync_nodes = corosync_conf.get_nodes() if corosync_conf else []
remote_and_guest_nodes = []
if cib is not None:
cib_root = get_root(cib)
remote_and_guest_nodes = (
remote_node.find_node_list(cib_root)
+
guest_node.find_node_list(cib_root)
)
return corosync_nodes, remote_and_guest_nodes
def __get_nodes_names(corosync_nodes, remote_and_guest_nodes):
return (
[node.name for node in corosync_nodes if node.name]
+
[node.name for node in remote_and_guest_nodes]
)
def __get_nodes_addrs(corosync_nodes, remote_and_guest_nodes):
nodes_addrs = [node.addr for node in remote_and_guest_nodes]
for node in corosync_nodes:
nodes_addrs += node.addrs_plain
return nodes_addrs
|
gpl-2.0
|
nanolearningllc/edx-platform-cypress
|
lms/djangoapps/instructor_analytics/tests/test_distributions.py
|
165
|
5109
|
""" Tests for analytics.distributions """
from django.test import TestCase
from nose.tools import raises
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from instructor_analytics.distributions import profile_distribution, AVAILABLE_PROFILE_FEATURES
class TestAnalyticsDistributions(TestCase):
'''Test analytics distribution gathering.'''
def setUp(self):
super(TestAnalyticsDistributions, self).setUp()
self.course_id = SlashSeparatedCourseKey('robot', 'course', 'id')
self.users = [UserFactory(
profile__gender=['m', 'f', 'o'][i % 3],
profile__level_of_education=['a', 'hs', 'el'][i % 3],
profile__year_of_birth=i + 1930
) for i in xrange(30)]
self.ces = [CourseEnrollment.enroll(user, self.course_id)
for user in self.users]
@raises(ValueError)
def test_profile_distribution_bad_feature(self):
feature = 'robot-not-a-real-feature'
self.assertNotIn(feature, AVAILABLE_PROFILE_FEATURES)
profile_distribution(self.course_id, feature)
def test_profile_distribution_easy_choice(self):
feature = 'gender'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
self.assertEqual(distribution.type, 'EASY_CHOICE')
self.assertEqual(distribution.data['no_data'], 0)
self.assertEqual(distribution.data['m'], len(self.users) / 3)
self.assertEqual(distribution.choices_display_names['m'], 'Male')
def test_profile_distribution_open_choice(self):
feature = 'year_of_birth'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'OPEN_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertEqual(distribution.choices_display_names, None)
self.assertNotIn('no_data', distribution.data)
self.assertEqual(distribution.data[1930], 1)
def test_gender_count(self):
course_enrollments = CourseEnrollment.objects.filter(
course_id=self.course_id, user__profile__gender='m'
)
distribution = profile_distribution(self.course_id, "gender")
self.assertEqual(distribution.data['m'], len(course_enrollments))
course_enrollments[0].deactivate()
distribution = profile_distribution(self.course_id, "gender")
self.assertEqual(distribution.data['m'], len(course_enrollments) - 1)
def test_level_of_education_count(self):
course_enrollments = CourseEnrollment.objects.filter(
course_id=self.course_id, user__profile__level_of_education='hs'
)
distribution = profile_distribution(self.course_id, "level_of_education")
self.assertEqual(distribution.data['hs'], len(course_enrollments))
course_enrollments[0].deactivate()
distribution = profile_distribution(self.course_id, "level_of_education")
self.assertEqual(distribution.data['hs'], len(course_enrollments) - 1)
class TestAnalyticsDistributionsNoData(TestCase):
'''Test analytics distribution gathering.'''
def setUp(self):
super(TestAnalyticsDistributionsNoData, self).setUp()
self.course_id = SlashSeparatedCourseKey('robot', 'course', 'id')
self.users = [UserFactory(
profile__year_of_birth=i + 1930,
) for i in xrange(5)]
self.nodata_users = [UserFactory(
profile__year_of_birth=None,
profile__gender=[None, ''][i % 2]
) for i in xrange(4)]
self.users += self.nodata_users
self.ces = tuple(CourseEnrollment.enroll(user, self.course_id)
for user in self.users)
def test_profile_distribution_easy_choice_nodata(self):
feature = 'gender'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'EASY_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertNotEqual(distribution.choices_display_names, None)
self.assertIn('no_data', distribution.data)
self.assertEqual(distribution.data['no_data'], len(self.nodata_users))
def test_profile_distribution_open_choice_nodata(self):
feature = 'year_of_birth'
self.assertIn(feature, AVAILABLE_PROFILE_FEATURES)
distribution = profile_distribution(self.course_id, feature)
print distribution
self.assertEqual(distribution.type, 'OPEN_CHOICE')
self.assertTrue(hasattr(distribution, 'choices_display_names'))
self.assertEqual(distribution.choices_display_names, None)
self.assertIn('no_data', distribution.data)
self.assertEqual(distribution.data['no_data'], len(self.nodata_users))
|
agpl-3.0
|
klahnakoski/TestLog-ETL
|
vendor/mo_hg/relay/app.py
|
2
|
3084
|
# encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
from __future__ import absolute_import, division, unicode_literals
import os
import flask
from flask import Flask, Response
from mo_hg.relay.cache import Cache
from mo_json import value2json
from mo_logs import Except, Log, constants, startup
from pyLibrary.env.flask_wrappers import cors_wrapper
APP_NAME = "HG Relay"
class RelayApp(Flask):
def run(self, *args, **kwargs):
# ENSURE THE LOGGING IS CLEANED UP
try:
Flask.run(self, *args, **kwargs)
except BaseException as e: # MUST CATCH BaseException BECAUSE argparse LIKES TO EXIT THAT WAY, AND gunicorn WILL NOT REPORT
Log.warning(APP_NAME + " service shutdown!", cause=e)
finally:
Log.stop()
flask_app = None
config = None
cache = None
@cors_wrapper
def relay_get(path):
try:
return cache.request("get", path, flask.request.headers)
except Exception as e:
e = Except.wrap(e)
Log.warning("could not handle request", cause=e)
return Response(
value2json(e, pretty=True).encode('utf8'),
status=400,
headers={"Content-Type": "text/html"},
)
@cors_wrapper
def relay_post(path):
try:
return cache.request("post", path, flask.request.headers)
except Exception as e:
e = Except.wrap(e)
Log.warning("could not handle request", cause=e)
return Response(
value2json(e, pretty=True).encode('utf8'),
status=400,
headers={"Content-Type": "text/html"},
)
def add(any_flask_app):
global cache
cache = Cache(config.cache)
any_flask_app.add_url_rule(str("/<path:path>"), None, relay_get, methods=[str("GET")])
any_flask_app.add_url_rule(str("/<path:path>"), None, relay_post, methods=[str("POST")])
any_flask_app.add_url_rule(str("/"), None, relay_get, methods=[str("GET")])
any_flask_app.add_url_rule(str("/"), None, relay_post, methods=[str("POST")])
if __name__ in ("__main__",):
Log.note("Starting " + APP_NAME + " Service App...")
flask_app = RelayApp(__name__)
try:
config = startup.read_settings(filename=os.environ.get("HG_RELAY_CONFIG"))
constants.set(config.constants)
Log.start(config.debug)
add(flask_app)
Log.note("Started " + APP_NAME + " Service")
except BaseException as e: # MUST CATCH BaseException BECAUSE argparse LIKES TO EXIT THAT WAY, AND gunicorn WILL NOT REPORT
try:
Log.error(
"Serious problem with " + APP_NAME + " service construction! Shutdown!", cause=e
)
finally:
Log.stop()
if config.flask:
if config.flask.port and config.args.process_num:
config.flask.port += config.args.process_num
Log.note("Running Flask...")
flask_app.run(**config.flask)
|
mpl-2.0
|
heke123/chromium-crosswalk
|
tools/flakiness/is_flaky.py
|
84
|
2006
|
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs a test repeatedly to measure its flakiness. The return code is non-zero
if the failure rate is higher than the specified threshold, but is not 100%."""
import argparse
import multiprocessing.dummy
import subprocess
import sys
import time
def load_options():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--retries', default=1000, type=int,
help='Number of test retries to measure flakiness.')
parser.add_argument('--threshold', default=0.05, type=float,
help='Minimum flakiness level at which test is '
'considered flaky.')
parser.add_argument('--jobs', '-j', type=int, default=1,
help='Number of parallel jobs to run tests.')
parser.add_argument('command', nargs='+', help='Command to run test.')
return parser.parse_args()
def run_test(job):
print 'Starting retry attempt %d out of %d' % (job['index'] + 1,
job['retries'])
return subprocess.check_call(job['cmd'], stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
def main():
options = load_options()
num_passed = num_failed = 0
running = []
pool = multiprocessing.dummy.Pool(processes=options.jobs)
args = [{'index': index, 'retries': options.retries, 'cmd': options.command}
for index in range(options.retries)]
results = pool.map(run_test, args)
num_passed = len([retcode for retcode in results if retcode == 0])
num_failed = len(results) - num_passed
if num_passed == 0:
flakiness = 0
else:
flakiness = num_failed / float(len(results))
print 'Flakiness is %.2f' % flakiness
if flakiness > options.threshold:
return 1
else:
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
tectronics/lector
|
lector/editor/spellchecker.py
|
6
|
1392
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Lector: spellchecker.py
Copyright (C) 2009, John Schember
Modified for Lector by Zdenko Podobný
This code is released under MIT licence
"""
import re
from PyQt4.Qt import Qt, QAction
from PyQt4.Qt import QSyntaxHighlighter, QTextCharFormat
from PyQt4.QtCore import pyqtSignal
class Highlighter(QSyntaxHighlighter):
WORDS = u'(?iu)[\w\']+'
def __init__(self, *args):
QSyntaxHighlighter.__init__(self, *args)
self.dict = None
def setDict(self, dict):
self.dict = dict
def highlightBlock(self, text):
if not self.dict:
return
text = unicode(text)
format = QTextCharFormat()
format.setUnderlineColor(Qt.red)
format.setUnderlineStyle(QTextCharFormat.SpellCheckUnderline)
for word_object in re.finditer(self.WORDS, text):
if not self.dict.check(word_object.group()):
self.setFormat(word_object.start(),
word_object.end() - word_object.start(), format)
class SpellAction(QAction):
'''
A special QAction that returns the text in a signal.
'''
correct = pyqtSignal(unicode)
def __init__(self, *args):
QAction.__init__(self, *args)
self.triggered.connect(lambda x: self.correct.emit(
unicode(self.text())))
|
gpl-2.0
|
tuxfux-hlp-notes/python-batches
|
archieves/batch-64/15-files/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/escprober.py
|
2936
|
3187
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
|
gpl-3.0
|
abtink/openthread
|
tools/harness-automation/cases_R140/leader_7_1_6.py
|
9
|
1876
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from autothreadharness.harness_case import HarnessCase
import unittest
class Leader_7_1_6(HarnessCase):
role = HarnessCase.ROLE_LEADER
case = '7 1 6'
golden_devices_required = 4
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
sgraham/nope
|
tools/prepare-bisect-perf-regression.py
|
84
|
2403
|
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Prepare Performance Test Bisect Tool
This script is used by a try bot to create a working directory and sync an
initial copy of the depot for use in bisecting performance regressions.
An example usage:
./tools/prepare-bisect-perf-regressions.py --working_directory "~/builds"
--output_buildbot_annotations
Would result in creating ~/builds/bisect and then populating it with a copy of
the depot.
"""
import optparse
import sys
from auto_bisect import bisect_utils
def main():
"""Does an initial checkout of Chromium then exits."""
usage = ('%prog [options] [-- chromium-options]\n'
'Prepares a temporary depot for use on a try bot.')
parser = optparse.OptionParser(usage=usage)
parser.add_option('-w', '--working_directory',
type='str',
help='Path to the working directory where the script will '
'do an initial checkout of the chromium depot. The '
'files will be placed in a subdirectory "bisect" under '
'working_directory and that will be used to perform the '
'bisection.')
parser.add_option('--output_buildbot_annotations',
action='store_true',
help='Add extra annotation output for buildbot.')
parser.add_option('--target_platform',
type='choice',
choices=['chromium', 'cros', 'android'],
default='chromium',
help='The target platform. Choices are "chromium" (current '
'platform), "cros", or "android". If you specify something '
'other than "chromium", you must be properly set up to '
'build that platform.')
opts, _ = parser.parse_args()
if not opts.working_directory:
print 'Error: missing required parameter: --working_directory'
print
parser.print_help()
return 1
if not bisect_utils.CheckIfBisectDepotExists(opts):
try:
bisect_utils.CreateBisectDirectoryAndSetupDepot(
opts, bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS)
except RuntimeError:
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
bsd-3-clause
|
sunu/oh-missions-oppia-beta
|
extensions/rules/tar_file_string_test.py
|
4
|
5041
|
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, softwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classification of TarFileStrings."""
__author__ = 'Tarashish Mishra'
import base64
import os
import unittest
from core.domain import fs_domain
from extensions.rules import tar_file_string
import utils
class TarFileStringRuleUnitTests(unittest.TestCase):
"""Tests for rules operating on UnicodeString objects."""
def test_wrapper_name_rule(self):
TEST_DATA_DIR = 'extensions/rules/testdata'
rule = tar_file_string.ChecksWrapperDirName('myproject-0.1')
file_name = 'wrong-wrapper-name.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertTrue(rule.eval(encoded_content))
file_name = 'good.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertFalse(rule.eval(encoded_content))
def test_wrapper_presence_rule(self):
TEST_DATA_DIR = 'extensions/rules/testdata'
rule = tar_file_string.ChecksWrapperDirPresence()
file_name = 'no-wrapper-dir.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertTrue(rule.eval(encoded_content))
file_name = 'good.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertFalse(rule.eval(encoded_content))
def test_unexpected_file_rule(self):
TEST_DATA_DIR = 'extensions/rules/testdata'
rule = tar_file_string.HasUnexpectedFile(
["myproject-0.1", "myproject-0.1/hello.c",
"myproject-0.1/Makefile"]
)
file_name = 'unexpected-file.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertTrue(rule.eval(encoded_content))
file_name = 'good.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertFalse(rule.eval(encoded_content))
def test_unexpected_content_rule(self):
TEST_DATA_DIR = 'extensions/rules/testdata'
fs = fs_domain.AbstractFileSystem(
fs_domain.DiskBackedFileSystem(TEST_DATA_DIR))
CANONICAL_DATA_DIR = 'extensions/rules/testdata/canonical'
canonical_fs = fs_domain.AbstractFileSystem(
fs_domain.DiskBackedFileSystem(CANONICAL_DATA_DIR))
rule = tar_file_string.HasUnexpectedContent(
['hello.c', 'Makefile']).set_fs(canonical_fs)
file_name = 'incorrect-contents.tar.gz'
encoded_content = base64.b64encode(fs.get(file_name))
self.assertTrue(rule.eval(encoded_content))
file_name = 'good.tar.gz'
encoded_content = base64.b64encode(fs.get(file_name))
self.assertFalse(rule.eval(encoded_content))
def test_missing_expected_file_rule(self):
TEST_DATA_DIR = 'extensions/rules/testdata'
rule = tar_file_string.MissingExpectedFile(
["myproject-0.1", "myproject-0.1/hello.c",
"myproject-0.1/Makefile"]
)
file_name = 'missing-expected-file.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertTrue(rule.eval(encoded_content))
file_name = 'good.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertFalse(rule.eval(encoded_content))
def test_apple_double_file_rule(self):
TEST_DATA_DIR = 'extensions/rules/testdata'
rule = tar_file_string.HasAppleDoubleFile()
file_name = 'apple-double.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertTrue(rule.eval(encoded_content))
file_name = 'good.tar.gz'
encoded_content = base64.b64encode(utils.get_file_contents(
os.path.join(TEST_DATA_DIR, file_name), raw_bytes=True))
self.assertFalse(rule.eval(encoded_content))
|
apache-2.0
|
saurabh6790/test-frappe
|
frappe/model/__init__.py
|
28
|
2474
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# model __init__.py
from __future__ import unicode_literals
import frappe
import json
no_value_fields = ('Section Break', 'Column Break', 'HTML', 'Table', 'Button', 'Image', 'Fold', 'Heading')
display_fieldtypes = ('Section Break', 'Column Break', 'HTML', 'Button', 'Image', 'Fold', 'Heading')
default_fields = ('doctype','name','owner','creation','modified','modified_by',
'parent','parentfield','parenttype','idx','docstatus')
integer_docfield_properties = ("reqd", "search_index", "in_list_view", "permlevel",
"hidden", "read_only", "ignore_user_permissions", "allow_on_submit", "report_hide",
"in_filter", "no_copy", "print_hide", "unique")
optional_fields = ("_user_tags", "_comments", "_assign", "_starred_by")
def rename(doctype, old, new, debug=False):
import frappe.model.rename_doc
frappe.model.rename_doc.rename_doc(doctype, old, new, debug)
def copytables(srctype, src, srcfield, tartype, tar, tarfield, srcfields, tarfields=[]):
if not tarfields:
tarfields = srcfields
l = []
data = src.get(srcfield)
for d in data:
newrow = tar.append(tarfield)
newrow.idx = d.idx
for i in range(len(srcfields)):
newrow.set(tarfields[i], d.get(srcfields[i]))
l.append(newrow)
return l
def db_exists(dt, dn):
return frappe.db.exists(dt, dn)
def delete_fields(args_dict, delete=0):
"""
Delete a field.
* Deletes record from `tabDocField`
* If not single doctype: Drops column from table
* If single, deletes record from `tabSingles`
args_dict = { dt: [field names] }
"""
import frappe.utils
for dt in args_dict.keys():
fields = args_dict[dt]
if not fields: continue
frappe.db.sql("""\
DELETE FROM `tabDocField`
WHERE parent=%s AND fieldname IN (%s)
""" % ('%s', ", ".join(['"' + f + '"' for f in fields])), dt)
# Delete the data / column only if delete is specified
if not delete: continue
if frappe.db.get_value("DocType", dt, "issingle"):
frappe.db.sql("""\
DELETE FROM `tabSingles`
WHERE doctype=%s AND field IN (%s)
""" % ('%s', ", ".join(['"' + f + '"' for f in fields])), dt)
else:
existing_fields = frappe.db.sql("desc `tab%s`" % dt)
existing_fields = existing_fields and [e[0] for e in existing_fields] or []
query = "ALTER TABLE `tab%s` " % dt + \
", ".join(["DROP COLUMN `%s`" % f for f in fields if f in existing_fields])
frappe.db.commit()
frappe.db.sql(query)
|
mit
|
Azulinho/ansible
|
test/units/modules/cloud/amazon/test_data_pipeline.py
|
23
|
10635
|
# (c) 2017 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import json
import collections
import pytest
from . placebo_fixtures import placeboify, maybe_sleep
from ansible.modules.cloud.amazon import data_pipeline
from ansible.module_utils._text import to_text
# test_api_gateway.py requires the `boto3` and `botocore` modules
boto3 = pytest.importorskip('boto3')
@pytest.fixture(scope='module')
def dp_setup():
"""
Yield a FakeModule object, data pipeline id of a vanilla data pipeline, and data pipeline objects
This fixture is module-scoped, since this can be reused for multiple tests.
"""
Dependencies = collections.namedtuple("Dependencies", ["module", "data_pipeline_id", "objects"])
# get objects to use to test populating and activating the data pipeline
if not os.getenv('PLACEBO_RECORD'):
objects = [{"name": "Every 1 day",
"id": "DefaultSchedule",
"fields": []},
{"name": "Default",
"id": "Default",
"fields": []}]
else:
s3 = boto3.client('s3')
data = s3.get_object(Bucket="ansible-test-datapipeline", Key="pipeline-object/new.json")
objects = json.loads(to_text(data['Body'].read()))
# create a module with vanilla data pipeline parameters
params = {'name': 'ansible-test-create-pipeline',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'timeout': 300,
'objects': [],
'tags': {},
'parameters': [],
'values': []}
module = FakeModule(**params)
# yield a module, the data pipeline id, and the data pipeline objects (that are not yet defining the vanilla data pipeline)
if not os.getenv('PLACEBO_RECORD'):
yield Dependencies(module=module, data_pipeline_id='df-0590406117G8DPQZY2HA', objects=objects)
else:
connection = boto3.client('datapipeline')
changed, result = data_pipeline.create_pipeline(connection, module)
data_pipeline_id = result['data_pipeline']['pipeline_id']
yield Dependencies(module=module, data_pipeline_id=data_pipeline_id, objects=objects)
# remove data pipeline
if os.getenv('PLACEBO_RECORD'):
module.params.update(state='absent')
data_pipeline.delete_pipeline(connection, module)
class FakeModule(object):
def __init__(self, **kwargs):
self.params = kwargs
def fail_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
raise Exception('FAIL')
def exit_json(self, *args, **kwargs):
self.exit_args = args
self.exit_kwargs = kwargs
def test_create_pipeline_already_exists(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.create_pipeline(connection, dp_setup.module)
assert changed is False
assert "Data Pipeline ansible-test-create-pipeline is present" in result['msg']
def test_pipeline_field(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
pipeline_field_info = data_pipeline.pipeline_field(connection, dp_setup.data_pipeline_id, "@pipelineState")
assert pipeline_field_info == "PENDING"
def test_define_pipeline(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.define_pipeline(connection, dp_setup.module, dp_setup.objects, dp_setup.data_pipeline_id)
assert 'has been updated' in result
def test_deactivate_pipeline(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
changed, result = data_pipeline.deactivate_pipeline(connection, dp_setup.module)
assert "Data Pipeline ansible-test-create-pipeline deactivated" in result['msg']
def test_activate_without_population(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
with pytest.raises(Exception) as error_message:
changed, result = data_pipeline.activate_pipeline(connection, dp_setup.module)
assert error_message == "You need to populate your pipeline before activation."
def test_create_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-unittest-create-pipeline',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'timeout': 300,
'tags': {}}
m = FakeModule(**params)
changed, result = data_pipeline.create_pipeline(connection, m)
assert changed is True
assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline created."
data_pipeline.delete_pipeline(connection, m)
def test_create_pipeline_with_tags(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-unittest-create-pipeline_tags',
'description': 'ansible-datapipeline-unit-test',
'state': 'present',
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
changed, result = data_pipeline.create_pipeline(connection, m)
assert changed is True
assert result['msg'] == "Data Pipeline ansible-unittest-create-pipeline_tags created."
data_pipeline.delete_pipeline(connection, m)
def test_delete_nonexistent_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-test-nonexistent',
'description': 'ansible-test-nonexistent',
'state': 'absent',
'objects': [],
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
changed, result = data_pipeline.delete_pipeline(connection, m)
assert changed is False
def test_delete_pipeline(placeboify, maybe_sleep):
connection = placeboify.client('datapipeline')
params = {'name': 'ansible-test-nonexistent',
'description': 'ansible-test-nonexistent',
'state': 'absent',
'objects': [],
'tags': {'ansible': 'test'},
'timeout': 300}
m = FakeModule(**params)
data_pipeline.create_pipeline(connection, m)
changed, result = data_pipeline.delete_pipeline(connection, m)
assert changed is True
def test_build_unique_id_different(placeboify, maybe_sleep):
m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id'})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id-different'})
assert data_pipeline.build_unique_id(m) != data_pipeline.build_unique_id(m2)
def test_build_unique_id_same(placeboify, maybe_sleep):
m = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'description': 'test-unique-id', 'tags': {'ansible': 'test'}})
assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2)
def test_build_unique_id_obj(placeboify, maybe_sleep):
# check that the object can be different and the unique id should be the same; should be able to modify objects
m = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'first': 'object'}]})
m2 = FakeModule(**{'name': 'ansible-unittest-1', 'objects': [{'second': 'object'}]})
assert data_pipeline.build_unique_id(m) == data_pipeline.build_unique_id(m2)
def test_format_tags(placeboify, maybe_sleep):
unformatted_tags = {'key1': 'val1', 'key2': 'val2', 'key3': 'val3'}
formatted_tags = data_pipeline.format_tags(unformatted_tags)
for tag_set in formatted_tags:
assert unformatted_tags[tag_set['key']] == tag_set['value']
def test_format_empty_tags(placeboify, maybe_sleep):
unformatted_tags = {}
formatted_tags = data_pipeline.format_tags(unformatted_tags)
assert formatted_tags == []
def test_pipeline_description(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
dp_id = dp_setup.data_pipeline_id
pipelines = data_pipeline.pipeline_description(connection, dp_id)
assert dp_id == pipelines['pipelineDescriptionList'][0]['pipelineId']
def test_pipeline_description_nonexistent(placeboify, maybe_sleep):
hypothetical_pipeline_id = "df-015440025PF7YGLDK47C"
connection = placeboify.client('datapipeline')
with pytest.raises(Exception) as error:
data_pipeline.pipeline_description(connection, hypothetical_pipeline_id)
assert error == data_pipeline.DataPipelineNotFound
def test_check_dp_exists_true(placeboify, maybe_sleep, dp_setup):
connection = placeboify.client('datapipeline')
exists = data_pipeline.check_dp_exists(connection, dp_setup.data_pipeline_id)
assert exists is True
def test_check_dp_exists_false(placeboify, maybe_sleep):
hypothetical_pipeline_id = "df-015440025PF7YGLDK47C"
connection = placeboify.client('datapipeline')
exists = data_pipeline.check_dp_exists(connection, hypothetical_pipeline_id)
assert exists is False
def test_check_dp_status(placeboify, maybe_sleep, dp_setup):
inactive_states = ['INACTIVE', 'PENDING', 'FINISHED', 'DELETING']
connection = placeboify.client('datapipeline')
state = data_pipeline.check_dp_status(connection, dp_setup.data_pipeline_id, inactive_states)
assert state is True
def test_activate_pipeline(placeboify, maybe_sleep, dp_setup):
# use objects to define pipeline before activating
connection = placeboify.client('datapipeline')
data_pipeline.define_pipeline(connection,
module=dp_setup.module,
objects=dp_setup.objects,
dp_id=dp_setup.data_pipeline_id)
changed, result = data_pipeline.activate_pipeline(connection, dp_setup.module)
assert changed is True
|
gpl-3.0
|
chornsby/hayes
|
hayes/indexing.py
|
1
|
4216
|
# -- encoding: UTF-8 --
from hayes.analysis import AnalysisBase, builtin_simple_analyzer
from hayes.utils import object_to_dict
class DocumentIndex(object):
name = None
fields = {}
enable_source = True
enable_size = False
enable_timestamp = False
def get_model(self): # For Django compat.
return None
def get_objects(self):
return ()
def get_mapping(self):
mapping_json = {}
mapping_json["_source"] = {"enabled": self.enable_source}
if self.enable_size:
mapping_json["_size"] = {"enabled": True, "store": True, "type": "int"}
if self.enable_size:
mapping_json["_timestamp"] = {"enabled": True, "store": True, "type": "date"}
properties = mapping_json["properties"] = {}
for field_name, field in self.fields.iteritems():
if field_name == "_all":
mapping_json["_all"] = object_to_dict(field)
else:
assert isinstance(field, SearchField)
properties[field_name] = object_to_dict(field)
return mapping_json
def get_analysis_settings_fragment(self):
analyzers_by_name = {}
tokenizers_by_name = {}
filters_by_name = {}
for field in self.fields.itervalues():
if hasattr(field, "get_analyzers"): # It could be a dict too
for analyzer in field.get_analyzers():
if analyzer and isinstance(analyzer, AnalysisBase):
analyzers_by_name[analyzer.name] = analyzer
filters_by_name.update((f.name, f) for f in getattr(analyzer, "filters", ()) if hasattr(f, "name"))
tokenizer = getattr(analyzer, "tokenizer", None)
if getattr(tokenizer, "name", None):
tokenizers_by_name[tokenizer.name] = tokenizer
def to_dict_m(m):
out = {}
for k in m.itervalues():
d = k.to_dict()
if d:
out[k.name] = d
return out
return {
"analyzer": to_dict_m(analyzers_by_name),
"tokenizer": to_dict_m(tokenizers_by_name),
"filter": to_dict_m(filters_by_name),
}
def get_settings_fragment(self):
return {
"index": {
"analysis": self.get_analysis_settings_fragment()
}
}
class SearchField(object):
stored = False
indexed = True
def as_dict(self):
return {}
def get_analyzers(self):
return filter(None, [
getattr(self, "analyzer", None),
getattr(self, "index_analyzer", None),
getattr(self, "search_analyzer", None),
])
class StringField(SearchField):
def __init__(self, boost=1.0):
self.boost = boost
def as_dict(self):
return {"type": "string", "index": ("not_analyzed" if self.indexed else "none"), "store": self.stored, "boost": self.boost}
class TextField(StringField):
def __init__(self, analyzer=None, boost=1.0, term_vector="no"):
super(TextField, self).__init__(boost=boost)
self.analyzer = analyzer
self.term_vector = term_vector
if self.term_vector not in ("no", "yes", "with_offsets", "with_positions", "with_positions_offsets"):
raise ValueError("What a strange 'term_vector' value")
def as_dict(self):
val = {
"type": "string",
"index": ("analyzed" if self.indexed else "none"),
"store": self.stored,
"boost": self.boost,
"term_vector": self.term_vector
}
if self.analyzer:
val["analyzer"] = self.analyzer.name
return val
class BaseNumberField(SearchField):
pass
class IntegerField(BaseNumberField):
def as_dict(self):
return {"type": "integer", "store": self.stored}
class DecimalField(BaseNumberField):
def as_dict(self):
return {"type": "double", "store": self.stored}
class DateField(SearchField):
def as_dict(self):
return {"type": "date", "store": self.stored}
class BooleanField(SearchField):
def as_dict(self):
return {"type": "boolean", "store": self.stored}
class CompletionSuggestField(SearchField):
stored = False
def __init__(self, index_analyzer=builtin_simple_analyzer, search_analyzer=builtin_simple_analyzer, payloads=False, preserve_separators=True):
self.index_analyzer = index_analyzer
self.search_analyzer = search_analyzer
self.payloads = bool(payloads)
self.preserve_separators = bool(preserve_separators)
def as_dict(self):
return {
"type": "completion",
"payloads": self.payloads,
"preserve_separators": self.preserve_separators,
"index_analyzer": self.index_analyzer.name,
"search_analyzer": self.search_analyzer.name,
}
|
mit
|
jjalling/domoticz
|
plugins/examples/DenonMarantz.py
|
15
|
23003
|
#
# Denon AVR 4306 Plugin
#
# Author: Dnpwwo, 2016 - 2017
#
# Mode4 ("Sources") needs to have '|' delimited names of sources that the Denon knows about. The Selector can be changed afterwards to any text and the plugin will still map to the actual Denon name.
#
"""
<plugin key="Denon4306" version="3.2.0" name="Denon/Marantz Amplifier" author="dnpwwo" wikilink="" externallink="http://www.denon.co.uk/uk">
<description>
Denon (& Marantz) AVR Plugin.<br/><br/>
"Sources" need to have '|' delimited names of sources that the Denon knows about from the technical manual.<br/>
The Sources Selector(s) can be changed after initial creation to any text and the plugin will still map to the actual Denon name.<br/><br/>
Devices will be created in the Devices Tab only and will need to be manually made active.<br/><br/>
Auto-discovery is known to work on Linux but may not on Windows.
</description>
<params>
<param field="Port" label="Port" width="30px" required="true" default="23"/>
<param field="Mode1" label="Auto-Detect" width="75px">
<options>
<option label="True" value="Discover" default="true"/>
<option label="False" value="Fixed" />
</options>
</param>
<param field="Address" label="IP Address" width="200px"/>
<param field="Mode2" label="Discovery Match" width="250px" default="SDKClass=Receiver"/>
<param field="Mode3" label="Startup Delay" width="50px" required="true">
<options>
<option label="2" value="2"/>
<option label="3" value="3"/>
<option label="4" value="4" default="true" />
<option label="5" value="5"/>
<option label="6" value="6"/>
<option label="7" value="7"/>
<option label="10" value="10"/>
</options>
</param>
<param field="Mode4" label="Sources" width="550px" required="true" default="Off|DVD|VDP|TV|CD|DBS|Tuner|Phono|VCR-1|VCR-2|V.Aux|CDR/Tape|AuxNet|AuxIPod"/>
<param field="Mode6" label="Debug" width="75px">
<options>
<option label="True" value="Debug"/>
<option label="False" value="Normal" default="true" />
</options>
</param>
</params>
</plugin>
"""
import Domoticz
import base64
import datetime
class BasePlugin:
DenonConn = None
oustandingPings = 0
powerOn = False
mainOn = False
mainSource = 0
mainVolume1 = 0
zone2On = False
zone2Source = 0
zone2Volume = 0
zone3On = False
zone3Source = 0
zone3Volume = 0
ignoreMessages = "|SS|SV|SD|MS|PS|CV|SY|TP|"
selectorMap = {}
pollingDict = {"PW":"ZM?\r", "ZM":"SI?\r", "SI":"MV?\r", "MV":"MU?\r", "MU":"PW?\r" }
lastMessage = "PW"
lastHeartbeat = datetime.datetime.now()
SourceOptions = {}
def onStart(self):
if Parameters["Mode6"] == "Debug":
Domoticz.Debugging(1)
self.SourceOptions = {'LevelActions': '|'*Parameters["Mode4"].count('|'),
'LevelNames': Parameters["Mode4"],
'LevelOffHidden': 'false',
'SelectorStyle': '1'}
if (len(Devices) == 0):
Domoticz.Device(Name="Power", Unit=1, TypeName="Switch", Image=5).Create()
Domoticz.Device(Name="Main Zone", Unit=2, TypeName="Selector Switch", Switchtype=18, Image=5, Options=self.SourceOptions).Create()
Domoticz.Device(Name="Main Volume", Unit=3, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
else:
if (2 in Devices and (len(Devices[2].sValue) > 0)):
self.mainSource = int(Devices[2].sValue)
self.mainOn = (Devices[2].nValue != 0)
if (3 in Devices and (len(Devices[3].sValue) > 0)):
self.mainVolume1 = int(Devices[3].sValue) if (Devices[3].nValue != 0) else int(Devices[3].sValue)*-1
if (4 in Devices and (len(Devices[4].sValue) > 0)):
self.zone2Source = int(Devices[4].sValue)
self.zone2On = (Devices[4].nValue != 0)
if (5 in Devices and (len(Devices[5].sValue) > 0)):
self.zone2Volume = int(Devices[5].sValue) if (Devices[5].nValue != 0) else int(Devices[5].sValue)*-1
if (6 in Devices and (len(Devices[6].sValue) > 0)):
self.zone3Source = int(Devices[6].sValue)
self.zone3On = (Devices[6].nValue != 0)
if (7 in Devices and (len(Devices[7].sValue) > 0)):
self.zone3Volume = int(Devices[7].sValue) if (Devices[7].nValue != 0) else int(Devices[7].sValue)*-1
if (1 in Devices):
self.powerOn = (self.mainOn or self.zone2On or self.zone3On)
DumpConfigToLog()
dictValue=0
for item in Parameters["Mode4"].split('|'):
self.selectorMap[dictValue] = item
dictValue = dictValue + 10
self.handleConnect()
return
def onConnect(self, Connection, Status, Description):
if (Connection == self.DenonConn):
if (Status == 0):
Domoticz.Log("Connected successfully to: "+Connection.Address+":"+Connection.Port)
self.DenonConn.Send('PW?\r')
self.DenonConn.Send('ZM?\r', Delay=1)
self.DenonConn.Send('Z2?\r', Delay=2)
self.DenonConn.Send('Z3?\r', Delay=3)
else:
if (Description.find("Only one usage of each socket address") > 0):
Domoticz.Log(Connection.Address+":"+Connection.Port+" is busy, waiting.")
else:
Domoticz.Log("Failed to connect ("+str(Status)+") to: "+Connection.Address+":"+Connection.Port+" with error: "+Description)
self.DenonConn = None
self.powerOn = False
self.SyncDevices(1)
def onMessage(self, Connection, Data):
strData = Data.decode("utf-8", "ignore")
Domoticz.Debug("onMessage called with Data: '"+str(strData)+"'")
self.oustandingPings = 0
try:
# Beacon messages to find the amplifier
if (Connection.Name == "Beacon"):
dictAMXB = DecodeDDDMessage(strData)
if (strData.find(Parameters["Mode2"]) >= 0):
self.DenonConn = None
self.DenonConn = Domoticz.Connection(Name="Telnet", Transport="TCP/IP", Protocol="Line", Address=Connection.Address, Port=Parameters["Port"])
self.DenonConn.Connect()
try:
Domoticz.Log(dictAMXB['Make']+", "+dictAMXB['Model']+" Receiver discovered successfully at address: "+Connection.Address)
except KeyError:
Domoticz.Log("'Unknown' Receiver discovered successfully at address: "+Connection.Address)
else:
try:
Domoticz.Log("Discovery message for Class: '"+dictAMXB['SDKClass']+"', Make '"+dictAMXB['Make']+"', Model '"+dictAMXB['Model']+"' seen at address: "+Connection.Address)
except KeyError:
Domoticz.Log("Discovery message '"+str(strData)+"' seen at address: "+Connection.Address)
# Otherwise handle amplifier
else:
strData = strData.strip()
action = strData[0:2]
detail = strData[2:]
if (action in self.pollingDict): self.lastMessage = action
if (action == "PW"): # Power Status
if (detail == "STANDBY"):
self.powerOn = False
elif (detail == "ON"):
self.powerOn = True
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "ZM"): # Main Zone on/off
if (detail == "ON"):
self.mainOn = True
elif (detail == "OFF"):
self.mainOn = False
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "SI"): # Main Zone Source Input
for key, value in self.selectorMap.items():
if (detail == value): self.mainSource = key
elif (action == "MV"): # Master Volume
if (detail.isdigit()):
if (abs(self.mainVolume1) != int(detail[0:2])): self.mainVolume1 = int(detail[0:2])
elif (detail[0:3] == "MAX"): Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
else: Domoticz.Log("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "MU"): # Overall Mute
if (detail == "ON"): self.mainVolume1 = abs(self.mainVolume1)*-1
elif (detail == "OFF"): self.mainVolume1 = abs(self.mainVolume1)
else: Domoticz.Debug("Unknown: Action "+action+", Detail '"+detail+"' ignored.")
elif (action == "Z2"): # Zone 2
# Zone 2 response, make sure we have Zone 2 devices in Domoticz and they are polled
if (4 not in Devices):
LevelActions = '|'*Parameters["Mode4"].count('|')
Domoticz.Device(Name="Zone 2", Unit=4, TypeName="Selector Switch", Switchtype=18, Image=5, Options=self.SourceOptions).Create()
Domoticz.Log("Zone 2 responded, devices added.")
if (5 not in Devices):
Domoticz.Device(Name="Volume 2", Unit=5, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
if ("Z2" not in self.pollingDict):
self.pollingDict = {"PW":"ZM?\r", "ZM":"SI?\r", "SI":"MV?\r", "MV":"MU?\r", "MU":"Z2?\r", "Z2":"PW?\r" }
if (detail == "ON"):
self.zone2On = True
elif (detail == "OFF"):
self.zone2On = False
elif (detail == "MUON"):
self.zone2Volume = abs(self.zone2Volume)*-1
elif (detail == "MUOFF"):
self.zone2Volume = abs(self.zone2Volume)
elif (detail.isdigit()):
if (abs(self.zone2Volume) != int(detail[0:2])): self.zone2Volume = int(detail[0:2])
else:
for key, value in self.selectorMap.items():
if (detail == value): self.zone2Source = key
elif (action == "Z3"): # Zone 3
# Zone 3 response, make sure we have Zone 3 devices in Domoticz and they are polled
if (6 not in Devices):
LevelActions = '|'*Parameters["Mode4"].count('|')
Domoticz.Device(Name="Zone 3", Unit=6, TypeName="Selector Switch", Switchtype=18, Image=5, Options=self.SourceOptions).Create()
Domoticz.Log("Zone 3 responded, devices added.")
if (7 not in Devices):
Domoticz.Device(Name="Volume 3", Unit=7, Type=244, Subtype=73, Switchtype=7, Image=8).Create()
if ("Z3" not in self.pollingDict):
self.pollingDict = {"PW":"ZM?\r", "ZM":"SI?\r", "SI":"MV?\r", "MV":"MU?\r", "MU":"Z2?\r", "Z2":"Z3?\r", "Z3":"PW?\r" }
if (detail == "ON"):
self.zone3On = True
elif (detail == "OFF"):
self.zone3On = False
elif (detail == "MUON"):
self.zone3Volume = abs(self.zone3Volume)*-1
elif (detail == "MUOFF"):
self.zone3Volume = abs(self.zone3Volume)
elif (detail.isdigit()):
if (abs(self.zone3Volume) != int(detail[0:2])): self.zone3Volume = int(detail[0:2])
else:
for key, value in self.selectorMap.items():
if (detail == value): self.zone3Source = key
else:
if (self.ignoreMessages.find(action) < 0):
Domoticz.Debug("Unknown message '"+action+"' ignored.")
self.SyncDevices(0)
except Exception as inst:
Domoticz.Error("Exception in onMessage, called with Data: '"+str(strData)+"'")
Domoticz.Error("Exception detail: '"+str(inst)+"'")
raise
def onCommand(self, Unit, Command, Level, Hue):
Domoticz.Log("onCommand called for Unit " + str(Unit) + ": Parameter '" + str(Command) + "', Level: " + str(Level))
Command = Command.strip()
action, sep, params = Command.partition(' ')
action = action.capitalize()
params = params.capitalize()
delay = 0
if (self.powerOn == False):
delay = int(Parameters["Mode3"])
else:
# Amp will ignore commands if it is responding to a heartbeat so delay send
lastHeartbeatDelta = (datetime.datetime.now()-self.lastHeartbeat).total_seconds()
if (lastHeartbeatDelta < 0.5):
delay = 1
Domoticz.Log("Last heartbeat was "+str(lastHeartbeatDelta)+" seconds ago, delaying command send.")
if (Unit == 1): # Main power switch
if (action == "On"):
self.DenonConn.Send(Message='PWON\r')
elif (action == "Off"):
self.DenonConn.Send(Message='PWSTANDBY\r', Delay=delay)
# Main Zone devices
elif (Unit == 2): # Main selector
if (action == "On"):
self.DenonConn.Send(Message='ZMON\r')
elif (action == "Set"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
self.DenonConn.Send(Message='SI'+self.selectorMap[Level]+'\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='ZMOFF\r', Delay=delay)
elif (Unit == 3): # Main Volume control
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (action == "On"):
self.DenonConn.Send(Message='MUOFF\r', Delay=delay)
elif (action == "Set"):
self.DenonConn.Send(Message='MV'+str(Level)+'\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='MUON\r', Delay=delay)
# Zone 2 devices
elif (Unit == 4): # Zone 2 selector
if (action == "On"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
self.DenonConn.Send(Message='Z2ON\r', Delay=delay)
elif (action == "Set"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (self.zone2On == False):
self.DenonConn.Send(Message='Z2ON\r', Delay=delay)
delay += 1
self.DenonConn.Send(Message='Z2'+self.selectorMap[Level]+'\r', Delay=delay)
delay += 1
self.DenonConn.Send(Message='Z2?\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='Z2OFF\r', Delay=delay)
elif (Unit == 5): # Zone 2 Volume control
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (self.zone2On == False):
self.DenonConn.Send(Message='Z2ON\r', Delay=delay)
delay += 1
if (action == "On"):
self.DenonConn.Send(Message='Z2MUOFF\r', Delay=delay)
elif (action == "Set"):
self.DenonConn.Send(Message='Z2'+str(Level)+'\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='Z2MUON\r', Delay=delay)
# Zone 3 devices
elif (Unit == 6): # Zone 3 selector
if (action == "On"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
self.DenonConn.Send(Message='Z3ON\r', Delay=delay)
elif (action == "Set"):
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (self.zone3On == False):
self.DenonConn.Send(Message='Z3ON\r', Delay=delay)
delay += 1
self.DenonConn.Send(Message='Z3'+self.selectorMap[Level]+'\r', Delay=delay)
delay += 1
self.DenonConn.Send(Message='Z3?\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='Z3OFF\r', Delay=delay)
elif (Unit == 7): # Zone 3 Volume control
if (self.powerOn == False): self.DenonConn.Send(Message='PWON\r')
if (self.zone3On == False):
self.DenonConn.Send(Message='Z3ON\r', Delay=delay)
delay += 1
if (action == "On"):
self.DenonConn.Send(Message='Z3MUOFF\r', Delay=delay)
elif (action == "Set"):
self.DenonConn.Send(Message='Z3'+str(Level)+'\r', Delay=delay)
elif (action == "Off"):
self.DenonConn.Send(Message='Z3MUON\r', Delay=delay)
return
def onDisconnect(self, Connection):
Domoticz.Error("Disconnected from: "+Connection.Address+":"+Connection.Port)
self.SyncDevices(1)
return
def onHeartbeat(self):
Domoticz.Debug("onHeartbeat called, last response seen "+str(self.oustandingPings)+" heartbeats ago.")
if (self.DenonConn == None):
self.handleConnect()
else:
if (self.DenonConn.Name == "Telnet") and (self.DenonConn.Connected()):
self.DenonConn.Send(self.pollingDict[self.lastMessage])
Domoticz.Debug("onHeartbeat: self.lastMessage "+self.lastMessage+", Sending '"+self.pollingDict[self.lastMessage][0:2]+"'.")
if (self.oustandingPings > 5):
Domoticz.Error(self.DenonConn.Name+" has not responded to 5 pings, terminating connection.")
self.DenonConn = None
self.powerOn = False
self.oustandingPings = -1
self.oustandingPings = self.oustandingPings + 1
self.lastHeartbeat = datetime.datetime.now()
def handleConnect(self):
self.SyncDevices(1)
self.DenonConn = None
if Parameters["Mode1"] == "Discover":
Domoticz.Log("Using auto-discovery mode to detect receiver as specified in parameters.")
self.DenonConn = Domoticz.Connection(Name="Beacon", Transport="UDP/IP", Address="239.255.250.250", Port=str(9131))
self.DenonConn.Listen()
else:
self.DenonConn = Domoticz.Connection(Name="Telnet", Transport="TCP/IP", Protocol="Line", Address=Parameters["Address"], Port=Parameters["Port"])
self.DenonConn.Connect()
def SyncDevices(self, TimedOut):
if (self.powerOn == False):
UpdateDevice(1, 0, "Off", TimedOut)
UpdateDevice(2, 0, "0", TimedOut)
UpdateDevice(3, 0, str(abs(self.mainVolume1)), TimedOut)
UpdateDevice(4, 0, "0", TimedOut)
UpdateDevice(5, 0, str(abs(self.zone2Volume)), TimedOut)
UpdateDevice(6, 0, "0", TimedOut)
UpdateDevice(7, 0, str(abs(self.zone3Volume)), TimedOut)
else:
UpdateDevice(1, 1, "On", TimedOut)
UpdateDevice(2, self.mainSource if self.mainOn else 0, str(self.mainSource if self.mainOn else 0), TimedOut)
if (self.mainVolume1 <= 0 or self.mainOn == False): UpdateDevice(3, 0, str(abs(self.mainVolume1)), TimedOut)
else: UpdateDevice(3, 2, str(self.mainVolume1), TimedOut)
UpdateDevice(4, self.zone2Source if self.zone2On else 0, str(self.zone2Source if self.zone2On else 0), TimedOut)
if (self.zone2Volume <= 0 or self.zone2On == False): UpdateDevice(5, 0, str(abs(self.zone2Volume)), TimedOut)
else: UpdateDevice(5, 2, str(self.zone2Volume), TimedOut)
UpdateDevice(6, self.zone3Source if self.zone3On else 0, str(self.zone3Source if self.zone3On else 0), TimedOut)
if (self.zone3Volume <= 0 or self.zone3On == False): UpdateDevice(7, 0, str(abs(self.zone3Volume)), TimedOut)
else: UpdateDevice(7, 2, str(self.zone3Volume), TimedOut)
return
global _plugin
_plugin = BasePlugin()
def onStart():
global _plugin
_plugin.onStart()
def onConnect(Connection, Status, Description):
global _plugin
_plugin.onConnect(Connection, Status, Description)
def onMessage(Connection, Data):
global _plugin
_plugin.onMessage(Connection, Data)
def onCommand(Unit, Command, Level, Hue):
global _plugin
_plugin.onCommand(Unit, Command, Level, Hue)
def onDisconnect(Connection):
global _plugin
_plugin.onDisconnect(Connection)
def onHeartbeat():
global _plugin
_plugin.onHeartbeat()
def UpdateDevice(Unit, nValue, sValue, TimedOut):
# Make sure that the Domoticz device still exists (they can be deleted) before updating it
if (Unit in Devices):
if (Devices[Unit].nValue != nValue) or (Devices[Unit].sValue != sValue) or (Devices[Unit].TimedOut != TimedOut):
Devices[Unit].Update(nValue=nValue, sValue=str(sValue), TimedOut=TimedOut)
Domoticz.Log("Update "+str(nValue)+":'"+str(sValue)+"' ("+Devices[Unit].Name+")")
return
def DumpConfigToLog():
for x in Parameters:
if Parameters[x] != "":
Domoticz.Debug( "'" + x + "':'" + str(Parameters[x]) + "'")
Domoticz.Debug("Device count: " + str(len(Devices)))
for x in Devices:
Domoticz.Debug("Device: " + str(x) + " - " + str(Devices[x]))
Domoticz.Debug("Internal ID: '" + str(Devices[x].ID) + "'")
Domoticz.Debug("External ID: '" + str(Devices[x].DeviceID) + "'")
Domoticz.Debug("Device Name: '" + Devices[x].Name + "'")
Domoticz.Debug("Device nValue: " + str(Devices[x].nValue))
Domoticz.Debug("Device sValue: '" + Devices[x].sValue + "'")
Domoticz.Debug("Device LastLevel: " + str(Devices[x].LastLevel))
return
def DecodeDDDMessage(Message):
# Sample discovery message
# AMXB<-SDKClass=Receiver><-Make=DENON><-Model=AVR-4306>
strChunks = Message.strip()
strChunks = strChunks[4:len(strChunks)-1].replace("<-","")
dirChunks = dict(item.split("=") for item in strChunks.split(">"))
return dirChunks
|
gpl-3.0
|
Hamza5/Basic-Regular-Expressions-Tester
|
GUI/bret_gui.py
|
1
|
30151
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'GUI/bret.ui'
#
# Created: Thu Sep 18 22:16:21 2014
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CentralWidget(object):
def setupUi(self, CentralWidget):
CentralWidget.setObjectName(_fromUtf8("CentralWidget"))
CentralWidget.resize(563, 561)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8("BRET-512.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
CentralWidget.setWindowIcon(icon)
self.verticalLayout_3 = QtGui.QVBoxLayout(CentralWidget)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.RegExpGroupBox = QtGui.QGroupBox(CentralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.RegExpGroupBox.sizePolicy().hasHeightForWidth())
self.RegExpGroupBox.setSizePolicy(sizePolicy)
self.RegExpGroupBox.setObjectName(_fromUtf8("RegExpGroupBox"))
self.formLayout = QtGui.QFormLayout(self.RegExpGroupBox)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.ExpandingFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.RegExpLabel = QtGui.QLabel(self.RegExpGroupBox)
self.RegExpLabel.setObjectName(_fromUtf8("RegExpLabel"))
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.RegExpLabel)
self.RegExpLayout = QtGui.QHBoxLayout()
self.RegExpLayout.setObjectName(_fromUtf8("RegExpLayout"))
self.RegExpLineEdit = QtGui.QLineEdit(self.RegExpGroupBox)
self.RegExpLineEdit.setObjectName(_fromUtf8("RegExpLineEdit"))
self.RegExpLayout.addWidget(self.RegExpLineEdit)
self.formLayout.setLayout(0, QtGui.QFormLayout.FieldRole, self.RegExpLayout)
self.RegExpOptionsLabel = QtGui.QLabel(self.RegExpGroupBox)
self.RegExpOptionsLabel.setObjectName(_fromUtf8("RegExpOptionsLabel"))
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.RegExpOptionsLabel)
self.RegExpOptionsLayout = QtGui.QHBoxLayout()
self.RegExpOptionsLayout.setObjectName(_fromUtf8("RegExpOptionsLayout"))
self.IgnoreCasePushButton = QtGui.QPushButton(self.RegExpGroupBox)
self.IgnoreCasePushButton.setCheckable(True)
self.IgnoreCasePushButton.setObjectName(_fromUtf8("IgnoreCasePushButton"))
self.RegExpOptionsLayout.addWidget(self.IgnoreCasePushButton)
self.MultiLinePushButton = QtGui.QPushButton(self.RegExpGroupBox)
self.MultiLinePushButton.setCheckable(True)
self.MultiLinePushButton.setObjectName(_fromUtf8("MultiLinePushButton"))
self.RegExpOptionsLayout.addWidget(self.MultiLinePushButton)
self.DotAllPushButton = QtGui.QPushButton(self.RegExpGroupBox)
self.DotAllPushButton.setCheckable(True)
self.DotAllPushButton.setObjectName(_fromUtf8("DotAllPushButton"))
self.RegExpOptionsLayout.addWidget(self.DotAllPushButton)
self.ASCIIOnlyPushButton = QtGui.QPushButton(self.RegExpGroupBox)
self.ASCIIOnlyPushButton.setCheckable(True)
self.ASCIIOnlyPushButton.setObjectName(_fromUtf8("ASCIIOnlyPushButton"))
self.RegExpOptionsLayout.addWidget(self.ASCIIOnlyPushButton)
self.formLayout.setLayout(1, QtGui.QFormLayout.FieldRole, self.RegExpOptionsLayout)
self.verticalLayout_3.addWidget(self.RegExpGroupBox)
self.MethodTabWidget = QtGui.QTabWidget(CentralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MethodTabWidget.sizePolicy().hasHeightForWidth())
self.MethodTabWidget.setSizePolicy(sizePolicy)
self.MethodTabWidget.setTabPosition(QtGui.QTabWidget.South)
self.MethodTabWidget.setObjectName(_fromUtf8("MethodTabWidget"))
self.TextTab = QtGui.QWidget()
self.TextTab.setObjectName(_fromUtf8("TextTab"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.TextTab)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.TextEdit = QtGui.QPlainTextEdit(self.TextTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.TextEdit.sizePolicy().hasHeightForWidth())
self.TextEdit.setSizePolicy(sizePolicy)
self.TextEdit.viewport().setProperty("cursor", QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.TextEdit.setObjectName(_fromUtf8("TextEdit"))
self.horizontalLayout_2.addWidget(self.TextEdit)
self.TextTabButtonsLayout = QtGui.QVBoxLayout()
self.TextTabButtonsLayout.setObjectName(_fromUtf8("TextTabButtonsLayout"))
self.PasteTextPushButton = QtGui.QPushButton(self.TextTab)
self.PasteTextPushButton.setEnabled(False)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.PasteTextPushButton.sizePolicy().hasHeightForWidth())
self.PasteTextPushButton.setSizePolicy(sizePolicy)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-paste"))
self.PasteTextPushButton.setIcon(icon)
self.PasteTextPushButton.setObjectName(_fromUtf8("PasteTextPushButton"))
self.TextTabButtonsLayout.addWidget(self.PasteTextPushButton)
self.ResetTextPushButton = QtGui.QPushButton(self.TextTab)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-clear"))
self.ResetTextPushButton.setIcon(icon)
self.ResetTextPushButton.setObjectName(_fromUtf8("ResetTextPushButton"))
self.TextTabButtonsLayout.addWidget(self.ResetTextPushButton)
self.horizontalLayout_2.addLayout(self.TextTabButtonsLayout)
icon = QtGui.QIcon.fromTheme(_fromUtf8("text-field"))
self.MethodTabWidget.addTab(self.TextTab, icon, _fromUtf8(""))
self.FileTab = QtGui.QWidget()
self.FileTab.setObjectName(_fromUtf8("FileTab"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.FileTab)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.FilePathLineEdit = QtGui.QLineEdit(self.FileTab)
self.FilePathLineEdit.setObjectName(_fromUtf8("FilePathLineEdit"))
self.horizontalLayout_3.addWidget(self.FilePathLineEdit)
self.FilePathPushButton = QtGui.QPushButton(self.FileTab)
icon = QtGui.QIcon.fromTheme(_fromUtf8("document-open"))
self.FilePathPushButton.setIcon(icon)
self.FilePathPushButton.setObjectName(_fromUtf8("FilePathPushButton"))
self.horizontalLayout_3.addWidget(self.FilePathPushButton)
self.ResetFilePathPushButton = QtGui.QPushButton(self.FileTab)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-clear"))
self.ResetFilePathPushButton.setIcon(icon)
self.ResetFilePathPushButton.setObjectName(_fromUtf8("ResetFilePathPushButton"))
self.horizontalLayout_3.addWidget(self.ResetFilePathPushButton)
icon = QtGui.QIcon.fromTheme(_fromUtf8("document-import"))
self.MethodTabWidget.addTab(self.FileTab, icon, _fromUtf8(""))
self.URLTab = QtGui.QWidget()
self.URLTab.setObjectName(_fromUtf8("URLTab"))
self.verticalLayout_4 = QtGui.QVBoxLayout(self.URLTab)
self.verticalLayout_4.setObjectName(_fromUtf8("verticalLayout_4"))
self.URLLayout = QtGui.QHBoxLayout()
self.URLLayout.setObjectName(_fromUtf8("URLLayout"))
self.URLLineEdit = QtGui.QLineEdit(self.URLTab)
self.URLLineEdit.setObjectName(_fromUtf8("URLLineEdit"))
self.URLLayout.addWidget(self.URLLineEdit)
self.PasteURLPushButton = QtGui.QPushButton(self.URLTab)
self.PasteURLPushButton.setEnabled(False)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-paste"))
self.PasteURLPushButton.setIcon(icon)
self.PasteURLPushButton.setObjectName(_fromUtf8("PasteURLPushButton"))
self.URLLayout.addWidget(self.PasteURLPushButton)
self.ResetURLPushButton = QtGui.QPushButton(self.URLTab)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-clear"))
self.ResetURLPushButton.setIcon(icon)
self.ResetURLPushButton.setObjectName(_fromUtf8("ResetURLPushButton"))
self.URLLayout.addWidget(self.ResetURLPushButton)
self.verticalLayout_4.addLayout(self.URLLayout)
self.DownloadProgressBar = QtGui.QProgressBar(self.URLTab)
self.DownloadProgressBar.setProperty("value", 0)
self.DownloadProgressBar.setObjectName(_fromUtf8("DownloadProgressBar"))
self.verticalLayout_4.addWidget(self.DownloadProgressBar)
icon = QtGui.QIcon.fromTheme(_fromUtf8("download"))
self.MethodTabWidget.addTab(self.URLTab, icon, _fromUtf8(""))
self.verticalLayout_3.addWidget(self.MethodTabWidget)
self.ResultsTabWidget = QtGui.QTabWidget(CentralWidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ResultsTabWidget.sizePolicy().hasHeightForWidth())
self.ResultsTabWidget.setSizePolicy(sizePolicy)
self.ResultsTabWidget.setObjectName(_fromUtf8("ResultsTabWidget"))
self.MatchesTab = QtGui.QWidget()
self.MatchesTab.setObjectName(_fromUtf8("MatchesTab"))
self.verticalLayout = QtGui.QVBoxLayout(self.MatchesTab)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.MatchesLayout = QtGui.QHBoxLayout()
self.MatchesLayout.setObjectName(_fromUtf8("MatchesLayout"))
self.MatchesLimitLabel = QtGui.QLabel(self.MatchesTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MatchesLimitLabel.sizePolicy().hasHeightForWidth())
self.MatchesLimitLabel.setSizePolicy(sizePolicy)
self.MatchesLimitLabel.setObjectName(_fromUtf8("MatchesLimitLabel"))
self.MatchesLayout.addWidget(self.MatchesLimitLabel)
self.MatchesLimitSpinBox = QtGui.QSpinBox(self.MatchesTab)
self.MatchesLimitSpinBox.setEnabled(False)
self.MatchesLimitSpinBox.setMinimum(1)
self.MatchesLimitSpinBox.setObjectName(_fromUtf8("MatchesLimitSpinBox"))
self.MatchesLayout.addWidget(self.MatchesLimitSpinBox)
self.NoMatchesLimitCheckBox = QtGui.QCheckBox(self.MatchesTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.NoMatchesLimitCheckBox.sizePolicy().hasHeightForWidth())
self.NoMatchesLimitCheckBox.setSizePolicy(sizePolicy)
self.NoMatchesLimitCheckBox.setChecked(True)
self.NoMatchesLimitCheckBox.setObjectName(_fromUtf8("NoMatchesLimitCheckBox"))
self.MatchesLayout.addWidget(self.NoMatchesLimitCheckBox)
self.GroupsCheckBox = QtGui.QCheckBox(self.MatchesTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.GroupsCheckBox.sizePolicy().hasHeightForWidth())
self.GroupsCheckBox.setSizePolicy(sizePolicy)
self.GroupsCheckBox.setObjectName(_fromUtf8("GroupsCheckBox"))
self.MatchesLayout.addWidget(self.GroupsCheckBox)
self.PositionsCheckBox = QtGui.QCheckBox(self.MatchesTab)
self.PositionsCheckBox.setObjectName(_fromUtf8("PositionsCheckBox"))
self.MatchesLayout.addWidget(self.PositionsCheckBox)
self.verticalLayout.addLayout(self.MatchesLayout)
self.MatchesTreeView = QtGui.QTreeView(self.MatchesTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.MatchesTreeView.sizePolicy().hasHeightForWidth())
self.MatchesTreeView.setSizePolicy(sizePolicy)
self.MatchesTreeView.setObjectName(_fromUtf8("MatchesTreeView"))
self.verticalLayout.addWidget(self.MatchesTreeView)
self.MatchesButtonsLayout = QtGui.QHBoxLayout()
self.MatchesButtonsLayout.setObjectName(_fromUtf8("MatchesButtonsLayout"))
self.NumberOfResultsLabel = QtGui.QLabel(self.MatchesTab)
self.NumberOfResultsLabel.setText(_fromUtf8(""))
self.NumberOfResultsLabel.setObjectName(_fromUtf8("NumberOfResultsLabel"))
self.MatchesButtonsLayout.addWidget(self.NumberOfResultsLabel)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.MatchesButtonsLayout.addItem(spacerItem)
self.FindMatchesPushButton = QtGui.QPushButton(self.MatchesTab)
self.FindMatchesPushButton.setEnabled(False)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-find"))
self.FindMatchesPushButton.setIcon(icon)
self.FindMatchesPushButton.setDefault(True)
self.FindMatchesPushButton.setObjectName(_fromUtf8("FindMatchesPushButton"))
self.MatchesButtonsLayout.addWidget(self.FindMatchesPushButton)
self.ResetMatchesPushButton = QtGui.QPushButton(self.MatchesTab)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-clear"))
self.ResetMatchesPushButton.setIcon(icon)
self.ResetMatchesPushButton.setObjectName(_fromUtf8("ResetMatchesPushButton"))
self.MatchesButtonsLayout.addWidget(self.ResetMatchesPushButton)
self.verticalLayout.addLayout(self.MatchesButtonsLayout)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-find"))
self.ResultsTabWidget.addTab(self.MatchesTab, icon, _fromUtf8(""))
self.ReplaceTab = QtGui.QWidget()
self.ReplaceTab.setObjectName(_fromUtf8("ReplaceTab"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.ReplaceTab)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.ReplacementTextLayout = QtGui.QHBoxLayout()
self.ReplacementTextLayout.setObjectName(_fromUtf8("ReplacementTextLayout"))
self.ReplacementTextLabel = QtGui.QLabel(self.ReplaceTab)
self.ReplacementTextLabel.setObjectName(_fromUtf8("ReplacementTextLabel"))
self.ReplacementTextLayout.addWidget(self.ReplacementTextLabel)
self.ReplacementTextLineEdit = QtGui.QLineEdit(self.ReplaceTab)
self.ReplacementTextLineEdit.setObjectName(_fromUtf8("ReplacementTextLineEdit"))
self.ReplacementTextLayout.addWidget(self.ReplacementTextLineEdit)
self.verticalLayout_2.addLayout(self.ReplacementTextLayout)
self.ReplacementsLayout = QtGui.QHBoxLayout()
self.ReplacementsLayout.setObjectName(_fromUtf8("ReplacementsLayout"))
self.ReplacementsLimitLabel = QtGui.QLabel(self.ReplaceTab)
self.ReplacementsLimitLabel.setObjectName(_fromUtf8("ReplacementsLimitLabel"))
self.ReplacementsLayout.addWidget(self.ReplacementsLimitLabel)
self.ReplacementsLimitSpinBox = QtGui.QSpinBox(self.ReplaceTab)
self.ReplacementsLimitSpinBox.setEnabled(False)
self.ReplacementsLimitSpinBox.setMinimum(1)
self.ReplacementsLimitSpinBox.setObjectName(_fromUtf8("ReplacementsLimitSpinBox"))
self.ReplacementsLayout.addWidget(self.ReplacementsLimitSpinBox)
self.NoReplacementsLimitCheckBox = QtGui.QCheckBox(self.ReplaceTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.NoReplacementsLimitCheckBox.sizePolicy().hasHeightForWidth())
self.NoReplacementsLimitCheckBox.setSizePolicy(sizePolicy)
self.NoReplacementsLimitCheckBox.setChecked(True)
self.NoReplacementsLimitCheckBox.setObjectName(_fromUtf8("NoReplacementsLimitCheckBox"))
self.ReplacementsLayout.addWidget(self.NoReplacementsLimitCheckBox)
self.NumberOfReplacementsLabel = QtGui.QLabel(self.ReplaceTab)
self.NumberOfReplacementsLabel.setText(_fromUtf8(""))
self.NumberOfReplacementsLabel.setObjectName(_fromUtf8("NumberOfReplacementsLabel"))
self.ReplacementsLayout.addWidget(self.NumberOfReplacementsLabel)
self.verticalLayout_2.addLayout(self.ReplacementsLayout)
self.ReplacementsPlainTextEdit = QtGui.QPlainTextEdit(self.ReplaceTab)
self.ReplacementsPlainTextEdit.setReadOnly(True)
self.ReplacementsPlainTextEdit.setObjectName(_fromUtf8("ReplacementsPlainTextEdit"))
self.verticalLayout_2.addWidget(self.ReplacementsPlainTextEdit)
self.ReplaceButtonsLayout = QtGui.QHBoxLayout()
self.ReplaceButtonsLayout.setObjectName(_fromUtf8("ReplaceButtonsLayout"))
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.ReplaceButtonsLayout.addItem(spacerItem1)
self.ReplacePushButton = QtGui.QPushButton(self.ReplaceTab)
self.ReplacePushButton.setEnabled(False)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-find-replace"))
self.ReplacePushButton.setIcon(icon)
self.ReplacePushButton.setDefault(True)
self.ReplacePushButton.setObjectName(_fromUtf8("ReplacePushButton"))
self.ReplaceButtonsLayout.addWidget(self.ReplacePushButton)
self.ResetReplacementsPushButton = QtGui.QPushButton(self.ReplaceTab)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-clear"))
self.ResetReplacementsPushButton.setIcon(icon)
self.ResetReplacementsPushButton.setObjectName(_fromUtf8("ResetReplacementsPushButton"))
self.ReplaceButtonsLayout.addWidget(self.ResetReplacementsPushButton)
self.verticalLayout_2.addLayout(self.ReplaceButtonsLayout)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-find-replace"))
self.ResultsTabWidget.addTab(self.ReplaceTab, icon, _fromUtf8(""))
self.SplitTab = QtGui.QWidget()
self.SplitTab.setObjectName(_fromUtf8("SplitTab"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.SplitTab)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.SplitsLimitLabel = QtGui.QLabel(self.SplitTab)
self.SplitsLimitLabel.setObjectName(_fromUtf8("SplitsLimitLabel"))
self.horizontalLayout_5.addWidget(self.SplitsLimitLabel)
self.SplitsLimitSpinBox = QtGui.QSpinBox(self.SplitTab)
self.SplitsLimitSpinBox.setEnabled(False)
self.SplitsLimitSpinBox.setMinimum(1)
self.SplitsLimitSpinBox.setObjectName(_fromUtf8("SplitsLimitSpinBox"))
self.horizontalLayout_5.addWidget(self.SplitsLimitSpinBox)
self.NoSplitsLimitCheckBox = QtGui.QCheckBox(self.SplitTab)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.NoSplitsLimitCheckBox.sizePolicy().hasHeightForWidth())
self.NoSplitsLimitCheckBox.setSizePolicy(sizePolicy)
self.NoSplitsLimitCheckBox.setChecked(True)
self.NoSplitsLimitCheckBox.setObjectName(_fromUtf8("NoSplitsLimitCheckBox"))
self.horizontalLayout_5.addWidget(self.NoSplitsLimitCheckBox)
self.NumberOfSplitsLabel = QtGui.QLabel(self.SplitTab)
self.NumberOfSplitsLabel.setText(_fromUtf8(""))
self.NumberOfSplitsLabel.setObjectName(_fromUtf8("NumberOfSplitsLabel"))
self.horizontalLayout_5.addWidget(self.NumberOfSplitsLabel)
self.verticalLayout_5.addLayout(self.horizontalLayout_5)
self.SplitResultsListView = QtGui.QListView(self.SplitTab)
self.SplitResultsListView.setObjectName(_fromUtf8("SplitResultsListView"))
self.verticalLayout_5.addWidget(self.SplitResultsListView)
self.SplitButtonsLayout = QtGui.QHBoxLayout()
self.SplitButtonsLayout.setObjectName(_fromUtf8("SplitButtonsLayout"))
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.SplitButtonsLayout.addItem(spacerItem2)
self.SplitPushButton = QtGui.QPushButton(self.SplitTab)
self.SplitPushButton.setEnabled(False)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-cut"))
self.SplitPushButton.setIcon(icon)
self.SplitPushButton.setDefault(True)
self.SplitPushButton.setObjectName(_fromUtf8("SplitPushButton"))
self.SplitButtonsLayout.addWidget(self.SplitPushButton)
self.ResetSplitPushButton = QtGui.QPushButton(self.SplitTab)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-clear"))
self.ResetSplitPushButton.setIcon(icon)
self.ResetSplitPushButton.setObjectName(_fromUtf8("ResetSplitPushButton"))
self.SplitButtonsLayout.addWidget(self.ResetSplitPushButton)
self.verticalLayout_5.addLayout(self.SplitButtonsLayout)
icon = QtGui.QIcon.fromTheme(_fromUtf8("edit-cut"))
self.ResultsTabWidget.addTab(self.SplitTab, icon, _fromUtf8(""))
self.verticalLayout_3.addWidget(self.ResultsTabWidget)
self.RegExpLabel.setBuddy(self.RegExpLineEdit)
self.MatchesLimitLabel.setBuddy(self.MatchesLimitSpinBox)
self.ReplacementTextLabel.setBuddy(self.ReplacementTextLineEdit)
self.ReplacementsLimitLabel.setBuddy(self.ReplacementsLimitSpinBox)
self.SplitsLimitLabel.setBuddy(self.SplitsLimitSpinBox)
self.retranslateUi(CentralWidget)
self.MethodTabWidget.setCurrentIndex(0)
self.ResultsTabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(CentralWidget)
def retranslateUi(self, CentralWidget):
CentralWidget.setWindowTitle(_translate("CentralWidget", "Basic Regular Expression Tester", None))
self.RegExpGroupBox.setTitle(_translate("CentralWidget", "Regular expresion and options", None))
self.RegExpLabel.setText(_translate("CentralWidget", "Re&gular expression", None))
self.RegExpLineEdit.setWhatsThis(_translate("CentralWidget", "Type here the regular expression in Python syntax", None))
self.RegExpOptionsLabel.setText(_translate("CentralWidget", "Additional options", None))
self.IgnoreCasePushButton.setToolTip(_translate("CentralWidget", "Do not distinguish between lowercase and uppercase letters.\n"
"For example : the pattern [A-Z] will match lowercase letters too.", None))
self.IgnoreCasePushButton.setStatusTip(_translate("CentralWidget", "Click to select \'Ignore case\' option", None))
self.IgnoreCasePushButton.setText(_translate("CentralWidget", "&Ignore case", None))
self.MultiLinePushButton.setToolTip(_translate("CentralWidget", "Make the pattern character \'^\' matches at the beginning of the string and\n"
"at the beginning of each line, and the pattern character \'$\' matches at the\n"
"end of the string and at the end of each line.", None))
self.MultiLinePushButton.setStatusTip(_translate("CentralWidget", "Click to select \'Multiline mode\' option", None))
self.MultiLinePushButton.setText(_translate("CentralWidget", "Multiline m&ode", None))
self.DotAllPushButton.setToolTip(_translate("CentralWidget", "Make the \'.\' special character match any character at all, including a newline.", None))
self.DotAllPushButton.setStatusTip(_translate("CentralWidget", "Click to select \'Dot matches all\' option", None))
self.DotAllPushButton.setText(_translate("CentralWidget", "&Dot matches all", None))
self.ASCIIOnlyPushButton.setToolTip(_translate("CentralWidget", "Make \\w, \\W, \\b, \\B, \\d, \\D, \\s and \\S perform ASCII-only\n"
"matching instead of full Unicode matching.", None))
self.ASCIIOnlyPushButton.setStatusTip(_translate("CentralWidget", "Click to select \'ASCII only\' option", None))
self.ASCIIOnlyPushButton.setText(_translate("CentralWidget", "&ASCII only", None))
self.PasteTextPushButton.setStatusTip(_translate("CentralWidget", "Paste text from the clipboard", None))
self.PasteTextPushButton.setText(_translate("CentralWidget", "Paste from\n"
"&clipboard", None))
self.ResetTextPushButton.setStatusTip(_translate("CentralWidget", "Clear the text", None))
self.ResetTextPushButton.setText(_translate("CentralWidget", "R&eset", None))
self.MethodTabWidget.setTabText(self.MethodTabWidget.indexOf(self.TextTab), _translate("CentralWidget", "Input te&xt", None))
self.FilePathLineEdit.setPlaceholderText(_translate("CentralWidget", "File path", None))
self.FilePathPushButton.setStatusTip(_translate("CentralWidget", "Select a file", None))
self.FilePathPushButton.setText(_translate("CentralWidget", "&Choose a file", None))
self.ResetFilePathPushButton.setStatusTip(_translate("CentralWidget", "Clear the file path", None))
self.ResetFilePathPushButton.setText(_translate("CentralWidget", "R&eset", None))
self.MethodTabWidget.setTabText(self.MethodTabWidget.indexOf(self.FileTab), _translate("CentralWidget", "Load &from file", None))
self.URLLineEdit.setPlaceholderText(_translate("CentralWidget", "URL", None))
self.PasteURLPushButton.setStatusTip(_translate("CentralWidget", "Past URL from the clipboard", None))
self.PasteURLPushButton.setText(_translate("CentralWidget", "Paste from &clipboard", None))
self.ResetURLPushButton.setStatusTip(_translate("CentralWidget", "Clear the URL", None))
self.ResetURLPushButton.setText(_translate("CentralWidget", "R&eset", None))
self.DownloadProgressBar.setFormat(_translate("CentralWidget", "Downloading %v/%m bytes (%p%)", None))
self.MethodTabWidget.setTabText(self.MethodTabWidget.indexOf(self.URLTab), _translate("CentralWidget", "Load from &URL", None))
self.MatchesLimitLabel.setText(_translate("CentralWidget", "Results limit", None))
self.MatchesLimitSpinBox.setStatusTip(_translate("CentralWidget", "Set the maximum number of results", None))
self.NoMatchesLimitCheckBox.setStatusTip(_translate("CentralWidget", "Ignore the limit of the results", None))
self.NoMatchesLimitCheckBox.setText(_translate("CentralWidget", "No &limit", None))
self.GroupsCheckBox.setStatusTip(_translate("CentralWidget", "Display also what was matched by parentheses", None))
self.GroupsCheckBox.setText(_translate("CentralWidget", "Sho&w matched groups", None))
self.PositionsCheckBox.setStatusTip(_translate("CentralWidget", "Display the beginning and ending positions where each match was found", None))
self.PositionsCheckBox.setText(_translate("CentralWidget", "Show po&sitions", None))
self.FindMatchesPushButton.setText(_translate("CentralWidget", "Find &matches", None))
self.ResetMatchesPushButton.setText(_translate("CentralWidget", "Rese&t results", None))
self.ResultsTabWidget.setTabText(self.ResultsTabWidget.indexOf(self.MatchesTab), _translate("CentralWidget", "Fi&nd matches", None))
self.ReplacementTextLabel.setText(_translate("CentralWidget", "Replace&ment text", None))
self.ReplacementTextLineEdit.setStatusTip(_translate("CentralWidget", "Text to be inserted where a match was found", None))
self.ReplacementsLimitLabel.setText(_translate("CentralWidget", "Replacements limit", None))
self.ReplacementsLimitSpinBox.setStatusTip(_translate("CentralWidget", "Set the maximum number of replacements", None))
self.NoReplacementsLimitCheckBox.setStatusTip(_translate("CentralWidget", "Ignore the limit of the replacements", None))
self.NoReplacementsLimitCheckBox.setText(_translate("CentralWidget", "No &limit", None))
self.ReplacePushButton.setText(_translate("CentralWidget", "&Search and replace", None))
self.ResetReplacementsPushButton.setText(_translate("CentralWidget", "Rese&t results", None))
self.ResultsTabWidget.setTabText(self.ResultsTabWidget.indexOf(self.ReplaceTab), _translate("CentralWidget", "Search and &replace", None))
self.SplitsLimitLabel.setText(_translate("CentralWidget", "Splits limit", None))
self.SplitsLimitSpinBox.setStatusTip(_translate("CentralWidget", "Set the maximum number of splits", None))
self.NoSplitsLimitCheckBox.setStatusTip(_translate("CentralWidget", "Ignore the limit of the splits", None))
self.NoSplitsLimitCheckBox.setText(_translate("CentralWidget", "No &limit", None))
self.SplitPushButton.setText(_translate("CentralWidget", "&Split text", None))
self.ResetSplitPushButton.setText(_translate("CentralWidget", "Rese&t results", None))
self.ResultsTabWidget.setTabText(self.ResultsTabWidget.indexOf(self.SplitTab), _translate("CentralWidget", "S&plit text", None))
|
gpl-3.0
|
jessstrap/servotk
|
tests/wpt/run.py
|
40
|
2301
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import multiprocessing
import os
import sys
import mozlog
import grouping_formatter
here = os.path.split(__file__)[0]
servo_root = os.path.abspath(os.path.join(here, "..", ".."))
def wpt_path(*args):
return os.path.join(here, *args)
def servo_path(*args):
return os.path.join(servo_root, *args)
# Imports
sys.path.append(wpt_path("harness"))
from wptrunner import wptrunner, wptcommandline
def run_tests(paths=None, **kwargs):
if paths is None:
paths = {}
set_defaults(paths, kwargs)
mozlog.commandline.log_formatters["servo"] = \
(grouping_formatter.GroupingFormatter, "A grouping output formatter")
use_mach_logging = False
if len(kwargs["test_list"]) == 1:
file_ext = os.path.splitext(kwargs["test_list"][0])[1].lower()
if file_ext in [".htm", ".html", ".js", ".xhtml"]:
use_mach_logging = True
if use_mach_logging:
wptrunner.setup_logging(kwargs, {"mach": sys.stdout})
else:
wptrunner.setup_logging(kwargs, {"servo": sys.stdout})
success = wptrunner.run_tests(**kwargs)
return 0 if success else 1
def set_defaults(paths, kwargs):
if kwargs["product"] is None:
kwargs["product"] = "servo"
if kwargs["config"] is None and "config" in paths:
kwargs["config"] = paths["config"]
if kwargs["include_manifest"] is None and "include_manifest" in paths:
kwargs["include_manifest"] = paths["include_manifest"]
if kwargs["binary"] is None:
bin_dir = "release" if kwargs["release"] else "debug"
bin_name = "servo"
if sys.platform == "win32":
bin_name += ".exe"
bin_path = servo_path("target", bin_dir, bin_name)
kwargs["binary"] = bin_path
if kwargs["processes"] is None:
kwargs["processes"] = multiprocessing.cpu_count()
kwargs["user_stylesheets"].append(servo_path("resources", "ahem.css"))
wptcommandline.check_args(kwargs)
def main(paths=None):
parser = wptcommandline.create_parser()
kwargs = vars(parser.parse_args())
return run_tests(paths, **kwargs)
|
mpl-2.0
|
sinkpoint/dipy
|
dipy/tracking/markov.py
|
10
|
17454
|
# -*- coding: utf-8 -*-
"""Implemention of various Tractography methods
these tools are meant to be paired with diffusion reconstruction methods from
dipy.reconst
This module uses the trackvis coordinate system, for more information about
this coordinate system please see dipy.tracking.utils
The following modules also use this coordinate system:
dipy.tracking.utils
dipy.tracking.integration
dipy.reconst.interpolate
"""
from __future__ import division, print_function, absolute_import
from ..utils.six.moves import xrange
import numpy as np
from ..reconst.interpolate import OutsideImage, NearestNeighborInterpolator
from ..reconst.peaks import default_sphere, peak_directions
from . import utils
class DirectionFinder(object):
sphere = default_sphere
relative_peak_threshold = .5
min_seperation_angle = 45
def __call__(self, fit):
discrete_odf = fit.odf(self.sphere)
directions, _, _ = peak_directions(discrete_odf, self.sphere,
self.relative_peak_threshold,
self.min_seperation_angle)
return directions
class BoundaryStepper(object):
"""Steps along a direction past the closest voxel boundary
Parameters
----------
voxel_size : array-like
Size of voxels in data volume
overstep : float
A small number used to prevent the track from getting stuck at the
edge of a voxel.
"""
def __init__(self, voxel_size=(1, 1, 1), overstep=.1):
self.overstep = overstep
self.voxel_size = np.array(voxel_size, 'float')
def __call__(self, location, step):
"""takes a step just past the edge of the next voxel along step
given a location and a step, finds the smallest step needed to move
into the next voxel
Parameters
----------
location : ndarray, (3,)
location to integrate from
step : ndarray, (3,)
direction in 3 space to integrate along
"""
step_sizes = self.voxel_size * (~np.signbit(step))
step_sizes -= location % self.voxel_size
step_sizes /= step
smallest_step = min(step_sizes) + self.overstep
return location + smallest_step * step
class FixedSizeStepper(object):
"""A stepper that uses a fixed step size"""
def __init__(self, step_size=.5):
self.step_size = step_size
def __call__(self, location, step):
"""Takes a step of step_size from location"""
new_location = self.step_size * step + location
return new_location
def markov_streamline(get_direction, take_step, seed, first_step, maxlen):
"""Creates a streamline from seed
Parameters
----------
get_direction : callable
This function should return a direction for the streamline given a
location and the previous direction.
take_step : callable
Take step should take a step from a location given a direction.
seed : array (3,)
The seed point of the streamline
first_step : array (3,)
A unit vector giving the direction of the first step
maxlen : int
The maximum number of segments allowed in the streamline. This is good
for preventing infinite loops.
Returns
-------
streamline : array (N, 3)
A streamline.
"""
streamline = []
location = seed
direction = first_step
try:
for i in xrange(maxlen):
streamline.append(location)
location = take_step(location, direction)
direction = get_direction(location, direction)
if direction is None:
streamline.append(location)
break
except OutsideImage:
pass
return np.array(streamline)
class MarkovIntegrator(object):
"""An abstract class for fiber-tracking"""
_get_directions = DirectionFinder()
def __init__(self, model, interpolator, mask, take_step, angle_limit,
seeds, max_cross=None, maxlen=500, mask_voxel_size=None,
affine=None):
"""Creates streamlines by using a Markov approach.
Parameters
----------
model : model
The model used to fit diffusion data.
interpolator : interpolator
Diffusion weighted data wrapped in an interpolator. Data should be
normalized.
mask : array, 3D
Used to confine tracking, streamlines are terminated if the
tracking leaves the mask.
take_step : callable
Determines the length of each step.
angle_limit : float [0, 90]
Maximum angle allowed between successive steps of the streamline.
seeds : array (N, 3)
Points to seed the tracking. Seed points should be given in point
space of the track (see ``affine``).
max_cross : int or None
The maximum number of direction to track from each seed in crossing
voxels. By default track all peaks of the odf, otherwise track the
largest `max_cross` peaks.
maxlen : int
Maximum number of steps to track from seed. Used to prevent
infinite loops.
mask_voxel_size : array (3,)
Voxel size for the mask. `mask` should cover the same FOV as data,
but it can have a different voxel size. Same as the data by
default.
affine : array (4, 4)
Coordinate space for the streamline point with respect to voxel
indices of input data.
"""
self.model = model
self.interpolator = interpolator
self.seeds = seeds
self.max_cross = max_cross
self.maxlen = maxlen
voxel_size = np.asarray(interpolator.voxel_size)
self._tracking_space = tracking_space = np.eye(4)
tracking_space[[0, 1, 2], [0, 1, 2]] = voxel_size
tracking_space[:3, 3] = voxel_size / 2.
if affine is None:
self.affine = tracking_space.copy()
else:
self.affine = affine
self._take_step = take_step
self._cos_similarity = np.cos(np.deg2rad(angle_limit))
if mask_voxel_size is None:
if mask.shape != interpolator.data.shape[:-1]:
raise ValueError("The shape of the mask and the shape of the "
"data do not match")
mask_voxel_size = interpolator.voxel_size
else:
mask_voxel_size = np.asarray(mask_voxel_size)
mask_FOV = mask_voxel_size * mask.shape
data_FOV = interpolator.voxel_size * interpolator.data.shape[:-1]
if not np.allclose(mask_FOV, data_FOV):
raise ValueError("The FOV of the data and the FOV of the mask "
"do not match")
self._mask = NearestNeighborInterpolator(mask.copy(), mask_voxel_size)
def __iter__(self):
# Check that seeds are reasonable
seeds = np.asarray(self.seeds)
if seeds.ndim != 2 or seeds.shape[1] != 3:
raise ValueError("Seeds should be an (N, 3) array of points")
# Compute affine from point space to tracking space, apply to seeds
inv_A = np.dot(self._tracking_space, np.linalg.inv(self.affine))
tracking_space_seeds = np.dot(seeds, inv_A[:3, :3].T) + inv_A[:3, 3]
# Make tracks, move them to point space and return
track = self._generate_streamlines(tracking_space_seeds)
return utils.move_streamlines(track, output_space=self.affine,
input_space=self._tracking_space)
def _generate_streamlines(self, seeds):
"""A streamline generator"""
for s in seeds:
directions = self._next_step(s, prev_step=None)
directions = directions[:self.max_cross]
for first_step in directions:
F = markov_streamline(self._next_step, self._take_step, s,
first_step, self.maxlen)
first_step = -first_step
B = markov_streamline(self._next_step, self._take_step, s,
first_step, self.maxlen)
yield np.concatenate([B[:0:-1], F], axis=0)
def _closest_peak(peak_directions, prev_step, cos_similarity):
"""Return the closest direction to prev_step from peak_directions.
All directions should be unit vectors. Antipodal symmetry is assumed, ie
direction x is the same as -x.
Parameters
----------
peak_directions : array (N, 3)
N unit vectors.
prev_step : array (3,) or None
Previous direction.
cos_similarity : float
`cos(max_angle)` where `max_angle` is the maximum allowed angle between
prev_step and the returned direction.
Returns
-------
direction : array or None
If prev_step is None, returns peak_directions. Otherwise returns the
closest direction to prev_step. If no directions are close enough to
prev_step, returns None
"""
if prev_step is None:
return peak_directions
if len(peak_directions) == 0:
return None
peak_dots = np.dot(peak_directions, prev_step)
closest_peak = abs(peak_dots).argmax()
dot_closest_peak = peak_dots[closest_peak]
if dot_closest_peak >= cos_similarity:
return peak_directions[closest_peak]
elif dot_closest_peak <= -cos_similarity:
return -peak_directions[closest_peak]
else:
return None
class ClosestDirectionTracker(MarkovIntegrator):
def _next_step(self, location, prev_step):
"""Returns the direction closest to prev_step at location
Fits the data from location using model and returns the tracking
direction closest to prev_step. If prev_step is None, all the
directions are returned.
Parameters
----------
location : point in space
location is passed to the interpolator in order to get data
prev_step : array_like (3,)
the direction of the previous tracking step
"""
if not self._mask[location]:
return None
vox_data = self.interpolator[location]
fit = self.model.fit(vox_data)
directions = self._get_directions(fit)
return _closest_peak(directions, prev_step, self._cos_similarity)
class ProbabilisticOdfWeightedTracker(MarkovIntegrator):
"""A stochastic (probabilistic) fiber tracking method
Stochastically tracks streamlines by randomly choosing directions from
sphere. The likelihood of a direction being chosen is taken from
`model.fit(data).odf(sphere)`. Negative values are set to 0. If no
directions less than `angle_limit` degrees are from the incoming direction
have a positive likelihood, the streamline is terminated.
Parameters
----------
model : model
The model used to fit diffusion data.
interpolator : interpolator
Diffusion weighted data wrapped in an interpolator. Data should be
normalized.
mask : array, 3D
Used to confine tracking, streamlines end when they leave the mask.
take_step : callable
Determines the length of each step.
angle_limit : float [0, 90]
The angle between successive steps in the streamlines cannot be more
than `angle_limit` degrees.
seeds : array (N, 3)
Points to seed the tracking.
sphere : Sphere
sphere used to evaluate the likelihood. A Sphere or a HemiSphere can be
used here. A HemiSphere is more efficient.
max_cross : int or None
Max number of directions to follow at each seed. By default follow all
peaks of the odf.
maxlen : int
Maximum number of segments to follow from seed. Used to prevent
infinite loops.
mask_voxel_size : array (3,)
Voxel size for the mask. `mask` should cover the same FOV as data, but
it can have a different voxel size. Same as the data by default.
Notes
-----
The tracker is based on a method described in [1]_ and [2]_ as fiber
orientation distribution (FOD) sampling.
References
----------
.. [1] Jeurissen, B., Leemans, A., Jones, D. K., Tournier, J.-D., & Sijbers,
J. (2011). Probabilistic fiber tracking using the residual bootstrap
with constrained spherical deconvolution. Human Brain Mapping, 32(3),
461-479. doi:10.1002/hbm.21032
.. [2] J-D. Tournier, F. Calamante, D. G. Gadian, A. Connelly (2005).
Probabilistic fibre tracking through regions containing crossing
fibres. http://cds.ismrm.org/ismrm-2005/Files/01343.pdf
"""
def __init__(self, model, interpolator, mask, take_step, angle_limit,
seeds, sphere, max_cross=None, maxlen=500,
mask_voxel_size=None, affine=None):
MarkovIntegrator.__init__(self, model, interpolator, mask, take_step,
angle_limit, seeds, max_cross, maxlen,
mask_voxel_size, affine)
self.sphere = sphere
self._set_adjacency_matrix(sphere, self._cos_similarity)
self._get_directions.sphere = sphere
def _set_adjacency_matrix(self, sphere, cos_similarity):
"""A boolean array of where the angle between vertices i and j of
sphere is less than `angle_limit` apart."""
matrix = np.dot(sphere.vertices, sphere.vertices.T)
matrix = abs(matrix) >= cos_similarity
keys = [tuple(v) for v in sphere.vertices]
adj_matrix = dict(zip(keys, matrix))
keys = [tuple(-v) for v in sphere.vertices]
adj_matrix.update(zip(keys, matrix))
self._adj_matrix = adj_matrix
def _next_step(self, location, prev_step):
"""Returns the direction closest to prev_step at location
Fits the data from location using model and returns the tracking
direction closest to prev_step. If prev_step is None, all the
directions are returned.
Parameters
----------
location : point in space
location is passed to the interpolator in order to get data
prev_step : array_like (3,)
the direction of the previous tracking step
"""
if not self._mask[location]:
return None
vox_data = self.interpolator[location]
fit = self.model.fit(vox_data)
if prev_step is None:
return self._get_directions(fit)
odf = fit.odf(self.sphere)
odf.clip(0, out=odf)
cdf = (self._adj_matrix[tuple(prev_step)] * odf).cumsum()
if cdf[-1] == 0:
return None
random_sample = np.random.random() * cdf[-1]
idx = cdf.searchsorted(random_sample, 'right')
direction = self.sphere.vertices[idx]
if np.dot(direction, prev_step) > 0:
return direction
else:
return -direction
class CDT_NNO(ClosestDirectionTracker):
"""ClosestDirectionTracker optimized for NearestNeighbor interpolator
For use with Nearest Neighbor interpolation, directions at each voxel are
remembered to avoid recalculating.
Parameters
----------
model : model
A model used to fit data. Should return a some fit object with
directions.
interpolator : interpolator
A NearestNeighbor interpolator, for other interpolators do not use this
class.
angle_limit : float [0, 90]
Maximum angle allowed between prev_step and next_step.
"""
def __init__(self, model, interpolator, mask, take_step, angle_limit,
seeds, max_cross=None, maxlen=500, mask_voxel_size=None,
affine=None):
if not isinstance(interpolator, NearestNeighborInterpolator):
msg = ("CDT_NNO is an optimized version of "
"ClosestDirectionTracker that requires a "
"NearestNeighborInterpolator")
raise ValueError(msg)
ClosestDirectionTracker.__init__(self, model, interpolator, mask,
take_step, angle_limit, seeds,
max_cross=max_cross, maxlen=maxlen,
mask_voxel_size=mask_voxel_size,
affine=None)
self._data = self.interpolator.data
self._voxel_size = self.interpolator.voxel_size
self.reset_cache()
def reset_cache(self):
"""Clear saved directions"""
lookup = np.empty(self._data.shape[:-1], 'int')
lookup.fill(-1)
self._lookup = lookup
self._peaks = []
def _next_step(self, location, prev_step):
"""Returns the direction closest to prev_step at location"""
if not self._mask[location]:
return None
vox_loc = tuple(location // self._voxel_size)
hash = self._lookup[vox_loc]
if hash >= 0:
directions = self._peaks[hash]
else:
vox_data = self._data[vox_loc]
fit = self.model.fit(vox_data)
directions = self._get_directions(fit)
self._lookup[vox_loc] = len(self._peaks)
self._peaks.append(directions)
return _closest_peak(directions, prev_step, self._cos_similarity)
|
bsd-3-clause
|
rmboggs/django
|
django/contrib/admin/templatetags/admin_urls.py
|
553
|
1812
|
from django import template
from django.contrib.admin.utils import quote
from django.core.urlresolvers import Resolver404, get_script_prefix, resolve
from django.utils.http import urlencode
from django.utils.six.moves.urllib.parse import parse_qsl, urlparse, urlunparse
register = template.Library()
@register.filter
def admin_urlname(value, arg):
return 'admin:%s_%s_%s' % (value.app_label, value.model_name, arg)
@register.filter
def admin_urlquote(value):
return quote(value)
@register.simple_tag(takes_context=True)
def add_preserved_filters(context, url, popup=False, to_field=None):
opts = context.get('opts')
preserved_filters = context.get('preserved_filters')
parsed_url = list(urlparse(url))
parsed_qs = dict(parse_qsl(parsed_url[4]))
merged_qs = dict()
if opts and preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters))
match_url = '/%s' % url.partition(get_script_prefix())[2]
try:
match = resolve(match_url)
except Resolver404:
pass
else:
current_url = '%s:%s' % (match.app_name, match.url_name)
changelist_url = 'admin:%s_%s_changelist' % (opts.app_label, opts.model_name)
if changelist_url == current_url and '_changelist_filters' in preserved_filters:
preserved_filters = dict(parse_qsl(preserved_filters['_changelist_filters']))
merged_qs.update(preserved_filters)
if popup:
from django.contrib.admin.options import IS_POPUP_VAR
merged_qs[IS_POPUP_VAR] = 1
if to_field:
from django.contrib.admin.options import TO_FIELD_VAR
merged_qs[TO_FIELD_VAR] = to_field
merged_qs.update(parsed_qs)
parsed_url[4] = urlencode(merged_qs)
return urlunparse(parsed_url)
|
bsd-3-clause
|
drammock/mne-python
|
tutorials/machine-learning/30_strf.py
|
10
|
14437
|
# -*- coding: utf-8 -*-
"""
=====================================================================
Spectro-temporal receptive field (STRF) estimation on continuous data
=====================================================================
This demonstrates how an encoding model can be fit with multiple continuous
inputs. In this case, we simulate the model behind a spectro-temporal receptive
field (or STRF). First, we create a linear filter that maps patterns in
spectro-temporal space onto an output, representing neural activity. We fit
a receptive field model that attempts to recover the original linear filter
that was used to create this data.
"""
# Authors: Chris Holdgraf <choldgraf@gmail.com>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 7
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.decoding import ReceptiveField, TimeDelayingRidge
from scipy.stats import multivariate_normal
from scipy.io import loadmat
from sklearn.preprocessing import scale
rng = np.random.RandomState(1337) # To make this example reproducible
###############################################################################
# Load audio data
# ---------------
#
# We'll read in the audio data from :footcite:`CrosseEtAl2016` in order to
# simulate a response.
#
# In addition, we'll downsample the data along the time dimension in order to
# speed up computation. Note that depending on the input values, this may
# not be desired. For example if your input stimulus varies more quickly than
# 1/2 the sampling rate to which we are downsampling.
# Read in audio that's been recorded in epochs.
path_audio = mne.datasets.mtrf.data_path()
data = loadmat(path_audio + '/speech_data.mat')
audio = data['spectrogram'].T
sfreq = float(data['Fs'][0, 0])
n_decim = 2
audio = mne.filter.resample(audio, down=n_decim, npad='auto')
sfreq /= n_decim
###############################################################################
# Create a receptive field
# ------------------------
#
# We'll simulate a linear receptive field for a theoretical neural signal. This
# defines how the signal will respond to power in this receptive field space.
n_freqs = 20
tmin, tmax = -0.1, 0.4
# To simulate the data we'll create explicit delays here
delays_samp = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
delays_sec = delays_samp / sfreq
freqs = np.linspace(50, 5000, n_freqs)
grid = np.array(np.meshgrid(delays_sec, freqs))
# We need data to be shaped as n_epochs, n_features, n_times, so swap axes here
grid = grid.swapaxes(0, -1).swapaxes(0, 1)
# Simulate a temporal receptive field with a Gabor filter
means_high = [.1, 500]
means_low = [.2, 2500]
cov = [[.001, 0], [0, 500000]]
gauss_high = multivariate_normal.pdf(grid, means_high, cov)
gauss_low = -1 * multivariate_normal.pdf(grid, means_low, cov)
weights = gauss_high + gauss_low # Combine to create the "true" STRF
kwargs = dict(vmax=np.abs(weights).max(), vmin=-np.abs(weights).max(),
cmap='RdBu_r', shading='gouraud')
fig, ax = plt.subplots()
ax.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax.set(title='Simulated STRF', xlabel='Time Lags (s)', ylabel='Frequency (Hz)')
plt.setp(ax.get_xticklabels(), rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# Simulate a neural response
# --------------------------
#
# Using this receptive field, we'll create an artificial neural response to
# a stimulus.
#
# To do this, we'll create a time-delayed version of the receptive field, and
# then calculate the dot product between this and the stimulus. Note that this
# is effectively doing a convolution between the stimulus and the receptive
# field. See `here <https://en.wikipedia.org/wiki/Convolution>`_ for more
# information.
# Reshape audio to split into epochs, then make epochs the first dimension.
n_epochs, n_seconds = 16, 5
audio = audio[:, :int(n_seconds * sfreq * n_epochs)]
X = audio.reshape([n_freqs, n_epochs, -1]).swapaxes(0, 1)
n_times = X.shape[-1]
# Delay the spectrogram according to delays so it can be combined w/ the STRF
# Lags will now be in axis 1, then we reshape to vectorize
delays = np.arange(np.round(tmin * sfreq),
np.round(tmax * sfreq) + 1).astype(int)
# Iterate through indices and append
X_del = np.zeros((len(delays),) + X.shape)
for ii, ix_delay in enumerate(delays):
# These arrays will take/put particular indices in the data
take = [slice(None)] * X.ndim
put = [slice(None)] * X.ndim
if ix_delay > 0:
take[-1] = slice(None, -ix_delay)
put[-1] = slice(ix_delay, None)
elif ix_delay < 0:
take[-1] = slice(-ix_delay, None)
put[-1] = slice(None, ix_delay)
X_del[ii][tuple(put)] = X[tuple(take)]
# Now set the delayed axis to the 2nd dimension
X_del = np.rollaxis(X_del, 0, 3)
X_del = X_del.reshape([n_epochs, -1, n_times])
n_features = X_del.shape[1]
weights_sim = weights.ravel()
# Simulate a neural response to the sound, given this STRF
y = np.zeros((n_epochs, n_times))
for ii, iep in enumerate(X_del):
# Simulate this epoch and add random noise
noise_amp = .002
y[ii] = np.dot(weights_sim, iep) + noise_amp * rng.randn(n_times)
# Plot the first 2 trials of audio and the simulated electrode activity
X_plt = scale(np.hstack(X[:2]).T).T
y_plt = scale(np.hstack(y[:2]))
time = np.arange(X_plt.shape[-1]) / sfreq
_, (ax1, ax2) = plt.subplots(2, 1, figsize=(6, 6), sharex=True)
ax1.pcolormesh(time, freqs, X_plt, vmin=0, vmax=4, cmap='Reds',
shading='gouraud')
ax1.set_title('Input auditory features')
ax1.set(ylim=[freqs.min(), freqs.max()], ylabel='Frequency (Hz)')
ax2.plot(time, y_plt)
ax2.set(xlim=[time.min(), time.max()], title='Simulated response',
xlabel='Time (s)', ylabel='Activity (a.u.)')
mne.viz.tight_layout()
###############################################################################
# Fit a model to recover this receptive field
# -------------------------------------------
#
# Finally, we'll use the :class:`mne.decoding.ReceptiveField` class to recover
# the linear receptive field of this signal. Note that properties of the
# receptive field (e.g. smoothness) will depend on the autocorrelation in the
# inputs and outputs.
# Create training and testing data
train, test = np.arange(n_epochs - 1), n_epochs - 1
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
X_train, X_test, y_train, y_test = [np.rollaxis(ii, -1, 0) for ii in
(X_train, X_test, y_train, y_test)]
# Model the simulated data as a function of the spectrogram input
alphas = np.logspace(-3, 3, 7)
scores = np.zeros_like(alphas)
models = []
for ii, alpha in enumerate(alphas):
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=alpha)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores[ii] = rf.score(X_test, y_test)
models.append(rf)
times = rf.delays_ / float(rf.sfreq)
# Choose the model that performed best on the held out data
ix_best_alpha = np.argmax(scores)
best_mod = models[ix_best_alpha]
coefs = best_mod.coef_[0]
best_pred = best_mod.predict(X_test)[:, 0]
# Plot the original STRF, and the one that we recovered with modeling.
_, (ax1, ax2) = plt.subplots(1, 2, figsize=(6, 3), sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, coefs, **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Reconstructed STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
# Plot the actual response and the predicted response on a held out stimulus
time_pred = np.arange(best_pred.shape[0]) / sfreq
fig, ax = plt.subplots()
ax.plot(time_pred, y_test, color='k', alpha=.2, lw=4)
ax.plot(time_pred, best_pred, color='r', lw=1)
ax.set(title='Original and predicted activity', xlabel='Time (s)')
ax.legend(['Original', 'Predicted'])
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# Visualize the effects of regularization
# ---------------------------------------
#
# Above we fit a :class:`mne.decoding.ReceptiveField` model for one of many
# values for the ridge regularization parameter. Here we will plot the model
# score as well as the model coefficients for each value, in order to
# visualize how coefficients change with different levels of regularization.
# These issues as well as the STRF pipeline are described in detail
# in :footcite:`TheunissenEtAl2001,WillmoreSmyth2003,HoldgrafEtAl2016`.
# Plot model score for each ridge parameter
fig = plt.figure(figsize=(10, 4))
ax = plt.subplot2grid([2, len(alphas)], [1, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores, marker='o', color='r')
ax.annotate('Best parameter', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Ridge regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
for ii, (rf, i_alpha) in enumerate(zip(models, alphas)):
ax = plt.subplot2grid([2, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
plt.xticks([], [])
plt.yticks([], [])
plt.autoscale(tight=True)
fig.suptitle('Model coefficients / scores for many ridge parameters', y=1)
mne.viz.tight_layout()
###############################################################################
# Using different regularization types
# ------------------------------------
# In addition to the standard ridge regularization, the
# :class:`mne.decoding.TimeDelayingRidge` class also exposes
# `Laplacian <https://en.wikipedia.org/wiki/Laplacian_matrix>`_ regularization
# term as:
#
# .. math::
# \left[\begin{matrix}
# 1 & -1 & & & & \\
# -1 & 2 & -1 & & & \\
# & -1 & 2 & -1 & & \\
# & & \ddots & \ddots & \ddots & \\
# & & & -1 & 2 & -1 \\
# & & & & -1 & 1\end{matrix}\right]
#
# This imposes a smoothness constraint of nearby time samples and/or features.
# Quoting :footcite:`CrosseEtAl2016` :
#
# Tikhonov [identity] regularization (Equation 5) reduces overfitting by
# smoothing the TRF estimate in a way that is insensitive to
# the amplitude of the signal of interest. However, the Laplacian
# approach (Equation 6) reduces off-sample error whilst preserving
# signal amplitude (Lalor et al., 2006). As a result, this approach
# usually leads to an improved estimate of the system’s response (as
# indexed by MSE) compared to Tikhonov regularization.
#
scores_lap = np.zeros_like(alphas)
models_lap = []
for ii, alpha in enumerate(alphas):
estimator = TimeDelayingRidge(tmin, tmax, sfreq, reg_type='laplacian',
alpha=alpha)
rf = ReceptiveField(tmin, tmax, sfreq, freqs, estimator=estimator)
rf.fit(X_train, y_train)
# Now make predictions about the model output, given input stimuli.
scores_lap[ii] = rf.score(X_test, y_test)
models_lap.append(rf)
ix_best_alpha_lap = np.argmax(scores_lap)
###############################################################################
# Compare model performance
# -------------------------
# Below we visualize the model performance of each regularization method
# (ridge vs. Laplacian) for different levels of alpha. As you can see, the
# Laplacian method performs better in general, because it imposes a smoothness
# constraint along the time and feature dimensions of the coefficients.
# This matches the "true" receptive field structure and results in a better
# model fit.
fig = plt.figure(figsize=(10, 6))
ax = plt.subplot2grid([3, len(alphas)], [2, 0], 1, len(alphas))
ax.plot(np.arange(len(alphas)), scores_lap, marker='o', color='r')
ax.plot(np.arange(len(alphas)), scores, marker='o', color='0.5', ls=':')
ax.annotate('Best Laplacian', (ix_best_alpha_lap,
scores_lap[ix_best_alpha_lap]),
(ix_best_alpha_lap, scores_lap[ix_best_alpha_lap] - .1),
arrowprops={'arrowstyle': '->'})
ax.annotate('Best Ridge', (ix_best_alpha, scores[ix_best_alpha]),
(ix_best_alpha, scores[ix_best_alpha] - .1),
arrowprops={'arrowstyle': '->'})
plt.xticks(np.arange(len(alphas)), ["%.0e" % ii for ii in alphas])
ax.set(xlabel="Laplacian regularization value", ylabel="Score ($R^2$)",
xlim=[-.4, len(alphas) - .6])
mne.viz.tight_layout()
# Plot the STRF of each ridge parameter
xlim = times[[0, -1]]
for ii, (rf_lap, rf, i_alpha) in enumerate(zip(models_lap, models, alphas)):
ax = plt.subplot2grid([3, len(alphas)], [0, ii], 1, 1)
ax.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Laplacian')
ax = plt.subplot2grid([3, len(alphas)], [1, ii], 1, 1)
ax.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax.set(xticks=[], yticks=[], xlim=xlim)
if ii == 0:
ax.set(ylabel='Ridge')
fig.suptitle('Model coefficients / scores for laplacian regularization', y=1)
mne.viz.tight_layout()
###############################################################################
# Plot the original STRF, and the one that we recovered with modeling.
rf = models[ix_best_alpha]
rf_lap = models_lap[ix_best_alpha_lap]
_, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(9, 3),
sharey=True, sharex=True)
ax1.pcolormesh(delays_sec, freqs, weights, **kwargs)
ax2.pcolormesh(times, rf.feature_names, rf.coef_[0], **kwargs)
ax3.pcolormesh(times, rf_lap.feature_names, rf_lap.coef_[0], **kwargs)
ax1.set_title('Original STRF')
ax2.set_title('Best Ridge STRF')
ax3.set_title('Best Laplacian STRF')
plt.setp([iax.get_xticklabels() for iax in [ax1, ax2, ax3]], rotation=45)
plt.autoscale(tight=True)
mne.viz.tight_layout()
###############################################################################
# References
# ==========
# .. footbibliography::
|
bsd-3-clause
|
bakhtout/odoo-educ
|
addons/account/report/account_partner_balance.py
|
286
|
11049
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
from common_report_header import common_report_header
class partner_balance(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
super(partner_balance, self).__init__(cr, uid, name, context=context)
self.account_ids = []
self.localcontext.update( {
'time': time,
'get_fiscalyear': self._get_fiscalyear,
'get_journal': self._get_journal,
'get_filter': self._get_filter,
'get_account': self._get_account,
'get_start_date':self._get_start_date,
'get_end_date':self._get_end_date,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_partners':self._get_partners,
'get_target_move': self._get_target_move,
})
def set_context(self, objects, data, ids, report_type=None):
self.display_partner = data['form'].get('display_partner', 'non-zero_balance')
obj_move = self.pool.get('account.move.line')
self.query = obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
self.result_selection = data['form'].get('result_selection')
self.target_move = data['form'].get('target_move', 'all')
if (self.result_selection == 'customer' ):
self.ACCOUNT_TYPE = ('receivable',)
elif (self.result_selection == 'supplier'):
self.ACCOUNT_TYPE = ('payable',)
else:
self.ACCOUNT_TYPE = ('payable', 'receivable')
self.cr.execute("SELECT a.id " \
"FROM account_account a " \
"LEFT JOIN account_account_type t " \
"ON (a.type = t.code) " \
"WHERE a.type IN %s " \
"AND a.active", (self.ACCOUNT_TYPE,))
self.account_ids = [a for (a,) in self.cr.fetchall()]
res = super(partner_balance, self).set_context(objects, data, ids, report_type=report_type)
lines = self.lines()
sum_debit = sum_credit = sum_litige = 0
for line in filter(lambda x: x['type'] == 3, lines):
sum_debit += line['debit'] or 0
sum_credit += line['credit'] or 0
sum_litige += line['enlitige'] or 0
self.localcontext.update({
'lines': lambda: lines,
'sum_debit': lambda: sum_debit,
'sum_credit': lambda: sum_credit,
'sum_litige': lambda: sum_litige,
})
return res
def lines(self):
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
full_account = []
self.cr.execute(
"SELECT p.ref,l.account_id,ac.name AS account_name,ac.code AS code,p.name, sum(debit) AS debit, sum(credit) AS credit, " \
"CASE WHEN sum(debit) > sum(credit) " \
"THEN sum(debit) - sum(credit) " \
"ELSE 0 " \
"END AS sdebit, " \
"CASE WHEN sum(debit) < sum(credit) " \
"THEN sum(credit) - sum(debit) " \
"ELSE 0 " \
"END AS scredit, " \
"(SELECT sum(debit-credit) " \
"FROM account_move_line l " \
"WHERE partner_id = p.id " \
"AND " + self.query + " " \
"AND blocked = TRUE " \
") AS enlitige " \
"FROM account_move_line l LEFT JOIN res_partner p ON (l.partner_id=p.id) " \
"JOIN account_account ac ON (l.account_id = ac.id)" \
"JOIN account_move am ON (am.id = l.move_id)" \
"WHERE ac.type IN %s " \
"AND am.state IN %s " \
"AND " + self.query + "" \
"GROUP BY p.id, p.ref, p.name,l.account_id,ac.name,ac.code " \
"ORDER BY l.account_id,p.name",
(self.ACCOUNT_TYPE, tuple(move_state)))
res = self.cr.dictfetchall()
if self.display_partner == 'non-zero_balance':
full_account = [r for r in res if r['sdebit'] > 0 or r['scredit'] > 0]
else:
full_account = [r for r in res]
for rec in full_account:
if not rec.get('name', False):
rec.update({'name': _('Unknown Partner')})
## We will now compute Total
subtotal_row = self._add_subtotal(full_account)
return subtotal_row
def _add_subtotal(self, cleanarray):
i = 0
completearray = []
tot_debit = 0.0
tot_credit = 0.0
tot_scredit = 0.0
tot_sdebit = 0.0
tot_enlitige = 0.0
for r in cleanarray:
# For the first element we always add the line
# type = 1 is the line is the first of the account
# type = 2 is an other line of the account
if i==0:
# We add the first as the header
#
##
new_header = {}
new_header['ref'] = ''
new_header['name'] = r['account_name']
new_header['code'] = r['code']
new_header['debit'] = r['debit']
new_header['credit'] = r['credit']
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = r['debit'] - r['credit']
new_header['type'] = 3
##
completearray.append(new_header)
#
r['type'] = 1
r['balance'] = float(r['sdebit']) - float(r['scredit'])
completearray.append(r)
#
tot_debit = r['debit']
tot_credit = r['credit']
tot_scredit = r['scredit']
tot_sdebit = r['sdebit']
tot_enlitige = (r['enlitige'] or 0.0)
#
else:
if cleanarray[i]['account_id'] <> cleanarray[i-1]['account_id']:
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
# we reset the counter
tot_debit = r['debit']
tot_credit = r['credit']
tot_scredit = r['scredit']
tot_sdebit = r['sdebit']
tot_enlitige = (r['enlitige'] or 0.0)
#
##
new_header = {}
new_header['ref'] = ''
new_header['name'] = r['account_name']
new_header['code'] = r['code']
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
##get_fiscalyear
##
completearray.append(new_header)
##
#
r['type'] = 1
#
r['balance'] = float(r['sdebit']) - float(r['scredit'])
completearray.append(r)
if cleanarray[i]['account_id'] == cleanarray[i-1]['account_id']:
# we reset the counter
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
new_header['type'] = 3
tot_debit = tot_debit + r['debit']
tot_credit = tot_credit + r['credit']
tot_scredit = tot_scredit + r['scredit']
tot_sdebit = tot_sdebit + r['sdebit']
tot_enlitige = tot_enlitige + (r['enlitige'] or 0.0)
new_header['debit'] = tot_debit
new_header['credit'] = tot_credit
new_header['scredit'] = tot_scredit
new_header['sdebit'] = tot_sdebit
new_header['enlitige'] = tot_enlitige
new_header['balance'] = float(tot_sdebit) - float(tot_scredit)
#
r['type'] = 2
#
r['balance'] = float(r['sdebit']) - float(r['scredit'])
#
completearray.append(r)
i = i + 1
return completearray
def _get_partners(self):
if self.result_selection == 'customer':
return _('Receivable Accounts')
elif self.result_selection == 'supplier':
return _('Payable Accounts')
elif self.result_selection == 'customer_supplier':
return _('Receivable and Payable Accounts')
return ''
class report_partnerbalance(osv.AbstractModel):
_name = 'report.account.report_partnerbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_partnerbalance'
_wrapped_report_class = partner_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
trondhindenes/ansible
|
test/integration/targets/async_fail/action_plugins/normal.py
|
152
|
2566
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.utils.vars import merge_hash
class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
# individual modules might disagree but as the generic the action plugin, pass at this point.
self._supports_check_mode = True
self._supports_async = True
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
if not result.get('skipped'):
if result.get('invocation', {}).get('module_args'):
# avoid passing to modules in case of no_log
# should not be set anymore but here for backwards compatibility
del result['invocation']['module_args']
# FUTURE: better to let _execute_module calculate this internally?
wrap_async = self._task.async_val and not self._connection.has_native_async
# do work!
result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=wrap_async))
# hack to keep --verbose from showing all the setup module result
# moved from setup module as now we filter out all _ansible_ from result
if self._task.action == 'setup':
result['_ansible_verbose_override'] = True
# Simulate a transient network failure
if self._task.action == 'async_status' and 'finished' in result and result['finished'] != 1:
raise AnsibleError('Pretend to fail somewher ein executing async_status')
if not wrap_async:
# remove a temporary path we created
self._remove_tmp_path(self._connection._shell.tmpdir)
return result
|
gpl-3.0
|
jaggu303619/asylum-v2.0
|
openerp/addons/point_of_sale/report/pos_lines.py
|
61
|
2251
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class pos_lines(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(pos_lines, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.localcontext.update({
'time': time,
'total_quantity': self.__total_quantity__,
'taxes':self.__taxes__,
})
def __total_quantity__(self, obj):
tot = 0
for line in obj.lines:
tot += line.qty
self.total = tot
return self.total
def __taxes__(self, obj):
self.cr.execute ( " Select acct.name from pos_order as po " \
" LEFT JOIN pos_order_line as pol ON po.id = pol.order_id " \
" LEFT JOIN product_taxes_rel as ptr ON pol.product_id = ptr.prod_id " \
" LEFT JOIN account_tax as acct ON acct.id = ptr.tax_id " \
" WHERE pol.id = %s", (obj.id,))
res=self.cr.fetchone()[0]
return res
report_sxw.report_sxw('report.pos.lines', 'pos.order', 'addons/point_of_sale/report/pos_lines.rml', parser=pos_lines,header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
sjsucohort6/openstack
|
python/venv/lib/python2.7/site-packages/tablib/packages/yaml/scanner.py
|
434
|
52630
|
# Scanner produces tokens of the following types:
# STREAM-START
# STREAM-END
# DIRECTIVE(name, value)
# DOCUMENT-START
# DOCUMENT-END
# BLOCK-SEQUENCE-START
# BLOCK-MAPPING-START
# BLOCK-END
# FLOW-SEQUENCE-START
# FLOW-MAPPING-START
# FLOW-SEQUENCE-END
# FLOW-MAPPING-END
# BLOCK-ENTRY
# FLOW-ENTRY
# KEY
# VALUE
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
__all__ = ['Scanner', 'ScannerError']
from error import MarkedYAMLError
from tokens import *
class ScannerError(MarkedYAMLError):
pass
class SimpleKey(object):
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
self.token_number = token_number
self.required = required
self.index = index
self.line = line
self.column = column
self.mark = mark
class Scanner(object):
def __init__(self):
"""Initialize the scanner."""
# It is assumed that Scanner and Reader will have a common descendant.
# Reader do the dirty work of checking for BOM and converting the
# input data to Unicode. It also adds NUL to the end.
#
# Reader supports the following methods
# self.peek(i=0) # peek the next i-th character
# self.prefix(l=1) # peek the next l characters
# self.forward(l=1) # read the next l characters and move the pointer.
# Had we reached the end of the stream?
self.done = False
# The number of unclosed '{' and '['. `flow_level == 0` means block
# context.
self.flow_level = 0
# List of processed tokens that are not yet emitted.
self.tokens = []
# Add the STREAM-START token.
self.fetch_stream_start()
# Number of tokens that were emitted through the `get_token` method.
self.tokens_taken = 0
# The current indentation level.
self.indent = -1
# Past indentation levels.
self.indents = []
# Variables related to simple keys treatment.
# A simple key is a key that is not denoted by the '?' indicator.
# Example of simple keys:
# ---
# block simple key: value
# ? not a simple key:
# : { flow simple key: value }
# We emit the KEY token before all keys, so when we find a potential
# simple key, we try to locate the corresponding ':' indicator.
# Simple keys should be limited to a single line and 1024 characters.
# Can a simple key start at the current position? A simple key may
# start:
# - at the beginning of the line, not counting indentation spaces
# (in block context),
# - after '{', '[', ',' (in the flow context),
# - after '?', ':', '-' (in the block context).
# In the block context, this flag also signifies if a block collection
# may start at the current position.
self.allow_simple_key = True
# Keep track of possible simple keys. This is a dictionary. The key
# is `flow_level`; there can be no more that one possible simple key
# for each level. The value is a SimpleKey record:
# (token_number, required, index, line, column, mark)
# A simple key may start with ALIAS, ANCHOR, TAG, SCALAR(flow),
# '[', or '{' tokens.
self.possible_simple_keys = {}
# Public methods.
def check_token(self, *choices):
# Check if the next token is one of the given types.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
if not choices:
return True
for choice in choices:
if isinstance(self.tokens[0], choice):
return True
return False
def peek_token(self):
# Return the next token, but do not delete if from the queue.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
return self.tokens[0]
def get_token(self):
# Return the next token.
while self.need_more_tokens():
self.fetch_more_tokens()
if self.tokens:
self.tokens_taken += 1
return self.tokens.pop(0)
# Private methods.
def need_more_tokens(self):
if self.done:
return False
if not self.tokens:
return True
# The current token may be a potential simple key, so we
# need to look further.
self.stale_possible_simple_keys()
if self.next_possible_simple_key() == self.tokens_taken:
return True
def fetch_more_tokens(self):
# Eat whitespaces and comments until we reach the next token.
self.scan_to_next_token()
# Remove obsolete possible simple keys.
self.stale_possible_simple_keys()
# Compare the current indentation and column. It may add some tokens
# and decrease the current indentation level.
self.unwind_indent(self.column)
# Peek the next character.
ch = self.peek()
# Is it the end of stream?
if ch == u'\0':
return self.fetch_stream_end()
# Is it a directive?
if ch == u'%' and self.check_directive():
return self.fetch_directive()
# Is it the document start?
if ch == u'-' and self.check_document_start():
return self.fetch_document_start()
# Is it the document end?
if ch == u'.' and self.check_document_end():
return self.fetch_document_end()
# TODO: support for BOM within a stream.
#if ch == u'\uFEFF':
# return self.fetch_bom() <-- issue BOMToken
# Note: the order of the following checks is NOT significant.
# Is it the flow sequence start indicator?
if ch == u'[':
return self.fetch_flow_sequence_start()
# Is it the flow mapping start indicator?
if ch == u'{':
return self.fetch_flow_mapping_start()
# Is it the flow sequence end indicator?
if ch == u']':
return self.fetch_flow_sequence_end()
# Is it the flow mapping end indicator?
if ch == u'}':
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
if ch == u',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
if ch == u'-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
if ch == u'?' and self.check_key():
return self.fetch_key()
# Is it the value indicator?
if ch == u':' and self.check_value():
return self.fetch_value()
# Is it an alias?
if ch == u'*':
return self.fetch_alias()
# Is it an anchor?
if ch == u'&':
return self.fetch_anchor()
# Is it a tag?
if ch == u'!':
return self.fetch_tag()
# Is it a literal scalar?
if ch == u'|' and not self.flow_level:
return self.fetch_literal()
# Is it a folded scalar?
if ch == u'>' and not self.flow_level:
return self.fetch_folded()
# Is it a single quoted scalar?
if ch == u'\'':
return self.fetch_single()
# Is it a double quoted scalar?
if ch == u'\"':
return self.fetch_double()
# It must be a plain scalar then.
if self.check_plain():
return self.fetch_plain()
# No? It's an error. Let's produce a nice error message.
raise ScannerError("while scanning for the next token", None,
"found character %r that cannot start any token"
% ch.encode('utf-8'), self.get_mark())
# Simple keys treatment.
def next_possible_simple_key(self):
# Return the number of the nearest possible simple key. Actually we
# don't need to loop through the whole dictionary. We may replace it
# with the following code:
# if not self.possible_simple_keys:
# return None
# return self.possible_simple_keys[
# min(self.possible_simple_keys.keys())].token_number
min_token_number = None
for level in self.possible_simple_keys:
key = self.possible_simple_keys[level]
if min_token_number is None or key.token_number < min_token_number:
min_token_number = key.token_number
return min_token_number
def stale_possible_simple_keys(self):
# Remove entries that are no longer possible simple keys. According to
# the YAML specification, simple keys
# - should be limited to a single line,
# - should be no longer than 1024 characters.
# Disabling this procedure will allow simple keys of any length and
# height (may cause problems if indentation is broken though).
for level in self.possible_simple_keys.keys():
key = self.possible_simple_keys[level]
if key.line != self.line \
or self.index-key.index > 1024:
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[level]
def save_possible_simple_key(self):
# The next token may start a simple key. We check if it's possible
# and save its position. This function is called for
# ALIAS, ANCHOR, TAG, SCALAR(flow), '[', and '{'.
# Check if a simple key is required at the current position.
required = not self.flow_level and self.indent == self.column
# A simple key is required only if it is the first token in the current
# line. Therefore it is always allowed.
assert self.allow_simple_key or not required
# The next token might be a simple key. Let's save it's number and
# position.
if self.allow_simple_key:
self.remove_possible_simple_key()
token_number = self.tokens_taken+len(self.tokens)
key = SimpleKey(token_number, required,
self.index, self.line, self.column, self.get_mark())
self.possible_simple_keys[self.flow_level] = key
def remove_possible_simple_key(self):
# Remove the saved possible key position at the current flow level.
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
if key.required:
raise ScannerError("while scanning a simple key", key.mark,
"could not found expected ':'", self.get_mark())
del self.possible_simple_keys[self.flow_level]
# Indentation functions.
def unwind_indent(self, column):
## In flow context, tokens should respect indentation.
## Actually the condition should be `self.indent >= column` according to
## the spec. But this condition will prohibit intuitively correct
## constructions such as
## key : {
## }
#if self.flow_level and self.indent > column:
# raise ScannerError(None, None,
# "invalid intendation or unclosed '[' or '{'",
# self.get_mark())
# In the flow context, indentation is ignored. We make the scanner less
# restrictive then specification requires.
if self.flow_level:
return
# In block context, we may need to issue the BLOCK-END tokens.
while self.indent > column:
mark = self.get_mark()
self.indent = self.indents.pop()
self.tokens.append(BlockEndToken(mark, mark))
def add_indent(self, column):
# Check if we need to increase indentation.
if self.indent < column:
self.indents.append(self.indent)
self.indent = column
return True
return False
# Fetchers.
def fetch_stream_start(self):
# We always add STREAM-START as the first token and STREAM-END as the
# last token.
# Read the token.
mark = self.get_mark()
# Add STREAM-START.
self.tokens.append(StreamStartToken(mark, mark,
encoding=self.encoding))
def fetch_stream_end(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
# Read the token.
mark = self.get_mark()
# Add STREAM-END.
self.tokens.append(StreamEndToken(mark, mark))
# The steam is finished.
self.done = True
def fetch_directive(self):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Scan and add DIRECTIVE.
self.tokens.append(self.scan_directive())
def fetch_document_start(self):
self.fetch_document_indicator(DocumentStartToken)
def fetch_document_end(self):
self.fetch_document_indicator(DocumentEndToken)
def fetch_document_indicator(self, TokenClass):
# Set the current intendation to -1.
self.unwind_indent(-1)
# Reset simple keys. Note that there could not be a block collection
# after '---'.
self.remove_possible_simple_key()
self.allow_simple_key = False
# Add DOCUMENT-START or DOCUMENT-END.
start_mark = self.get_mark()
self.forward(3)
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_start(self):
self.fetch_flow_collection_start(FlowSequenceStartToken)
def fetch_flow_mapping_start(self):
self.fetch_flow_collection_start(FlowMappingStartToken)
def fetch_flow_collection_start(self, TokenClass):
# '[' and '{' may start a simple key.
self.save_possible_simple_key()
# Increase the flow level.
self.flow_level += 1
# Simple keys are allowed after '[' and '{'.
self.allow_simple_key = True
# Add FLOW-SEQUENCE-START or FLOW-MAPPING-START.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_sequence_end(self):
self.fetch_flow_collection_end(FlowSequenceEndToken)
def fetch_flow_mapping_end(self):
self.fetch_flow_collection_end(FlowMappingEndToken)
def fetch_flow_collection_end(self, TokenClass):
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Decrease the flow level.
self.flow_level -= 1
# No simple keys after ']' or '}'.
self.allow_simple_key = False
# Add FLOW-SEQUENCE-END or FLOW-MAPPING-END.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(TokenClass(start_mark, end_mark))
def fetch_flow_entry(self):
# Simple keys are allowed after ','.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add FLOW-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(FlowEntryToken(start_mark, end_mark))
def fetch_block_entry(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a new entry?
if not self.allow_simple_key:
raise ScannerError(None, None,
"sequence entries are not allowed here",
self.get_mark())
# We may need to add BLOCK-SEQUENCE-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockSequenceStartToken(mark, mark))
# It's an error for the block entry to occur in the flow context,
# but we let the parser detect this.
else:
pass
# Simple keys are allowed after '-'.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add BLOCK-ENTRY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(BlockEntryToken(start_mark, end_mark))
def fetch_key(self):
# Block context needs additional checks.
if not self.flow_level:
# Are we allowed to start a key (not nessesary a simple)?
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping keys are not allowed here",
self.get_mark())
# We may need to add BLOCK-MAPPING-START.
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after '?' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add KEY.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(KeyToken(start_mark, end_mark))
def fetch_value(self):
# Do we determine a simple key?
if self.flow_level in self.possible_simple_keys:
# Add KEY.
key = self.possible_simple_keys[self.flow_level]
del self.possible_simple_keys[self.flow_level]
self.tokens.insert(key.token_number-self.tokens_taken,
KeyToken(key.mark, key.mark))
# If this key starts a new block mapping, we need to add
# BLOCK-MAPPING-START.
if not self.flow_level:
if self.add_indent(key.column):
self.tokens.insert(key.token_number-self.tokens_taken,
BlockMappingStartToken(key.mark, key.mark))
# There cannot be two simple keys one after another.
self.allow_simple_key = False
# It must be a part of a complex key.
else:
# Block context needs additional checks.
# (Do we really need them? They will be catched by the parser
# anyway.)
if not self.flow_level:
# We are allowed to start a complex value if and only if
# we can start a simple key.
if not self.allow_simple_key:
raise ScannerError(None, None,
"mapping values are not allowed here",
self.get_mark())
# If this value starts a new block mapping, we need to add
# BLOCK-MAPPING-START. It will be detected as an error later by
# the parser.
if not self.flow_level:
if self.add_indent(self.column):
mark = self.get_mark()
self.tokens.append(BlockMappingStartToken(mark, mark))
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Add VALUE.
start_mark = self.get_mark()
self.forward()
end_mark = self.get_mark()
self.tokens.append(ValueToken(start_mark, end_mark))
def fetch_alias(self):
# ALIAS could be a simple key.
self.save_possible_simple_key()
# No simple keys after ALIAS.
self.allow_simple_key = False
# Scan and add ALIAS.
self.tokens.append(self.scan_anchor(AliasToken))
def fetch_anchor(self):
# ANCHOR could start a simple key.
self.save_possible_simple_key()
# No simple keys after ANCHOR.
self.allow_simple_key = False
# Scan and add ANCHOR.
self.tokens.append(self.scan_anchor(AnchorToken))
def fetch_tag(self):
# TAG could start a simple key.
self.save_possible_simple_key()
# No simple keys after TAG.
self.allow_simple_key = False
# Scan and add TAG.
self.tokens.append(self.scan_tag())
def fetch_literal(self):
self.fetch_block_scalar(style='|')
def fetch_folded(self):
self.fetch_block_scalar(style='>')
def fetch_block_scalar(self, style):
# A simple key may follow a block scalar.
self.allow_simple_key = True
# Reset possible simple key on the current level.
self.remove_possible_simple_key()
# Scan and add SCALAR.
self.tokens.append(self.scan_block_scalar(style))
def fetch_single(self):
self.fetch_flow_scalar(style='\'')
def fetch_double(self):
self.fetch_flow_scalar(style='"')
def fetch_flow_scalar(self, style):
# A flow scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after flow scalars.
self.allow_simple_key = False
# Scan and add SCALAR.
self.tokens.append(self.scan_flow_scalar(style))
def fetch_plain(self):
# A plain scalar could be a simple key.
self.save_possible_simple_key()
# No simple keys after plain scalars. But note that `scan_plain` will
# change this flag if the scan is finished at the beginning of the
# line.
self.allow_simple_key = False
# Scan and add SCALAR. May change `allow_simple_key`.
self.tokens.append(self.scan_plain())
# Checkers.
def check_directive(self):
# DIRECTIVE: ^ '%' ...
# The '%' indicator is already checked.
if self.column == 0:
return True
def check_document_start(self):
# DOCUMENT-START: ^ '---' (' '|'\n')
if self.column == 0:
if self.prefix(3) == u'---' \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return True
def check_document_end(self):
# DOCUMENT-END: ^ '...' (' '|'\n')
if self.column == 0:
if self.prefix(3) == u'...' \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return True
def check_block_entry(self):
# BLOCK-ENTRY: '-' (' '|'\n')
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_key(self):
# KEY(flow context): '?'
if self.flow_level:
return True
# KEY(block context): '?' (' '|'\n')
else:
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_value(self):
# VALUE(flow context): ':'
if self.flow_level:
return True
# VALUE(block context): ':' (' '|'\n')
else:
return self.peek(1) in u'\0 \t\r\n\x85\u2028\u2029'
def check_plain(self):
# A plain scalar may start with any non-space character except:
# '-', '?', ':', ',', '[', ']', '{', '}',
# '#', '&', '*', '!', '|', '>', '\'', '\"',
# '%', '@', '`'.
#
# It may also start with
# '-', '?', ':'
# if it is followed by a non-space character.
#
# Note that we limit the last rule to the block context (except the
# '-' character) because we want the flow context to be space
# independent.
ch = self.peek()
return ch not in u'\0 \t\r\n\x85\u2028\u2029-?:,[]{}#&*!|>\'\"%@`' \
or (self.peek(1) not in u'\0 \t\r\n\x85\u2028\u2029'
and (ch == u'-' or (not self.flow_level and ch in u'?:')))
# Scanners.
def scan_to_next_token(self):
# We ignore spaces, line breaks and comments.
# If we find a line break in the block context, we set the flag
# `allow_simple_key` on.
# The byte order mark is stripped if it's the first character in the
# stream. We do not yet support BOM inside the stream as the
# specification requires. Any such mark will be considered as a part
# of the document.
#
# TODO: We need to make tab handling rules more sane. A good rule is
# Tabs cannot precede tokens
# BLOCK-SEQUENCE-START, BLOCK-MAPPING-START, BLOCK-END,
# KEY(block), VALUE(block), BLOCK-ENTRY
# So the checking code is
# if <TAB>:
# self.allow_simple_keys = False
# We also need to add the check for `allow_simple_keys == True` to
# `unwind_indent` before issuing BLOCK-END.
# Scanners for block, flow, and plain scalars need to be modified.
if self.index == 0 and self.peek() == u'\uFEFF':
self.forward()
found = False
while not found:
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
if self.scan_line_break():
if not self.flow_level:
self.allow_simple_key = True
else:
found = True
def scan_directive(self):
# See the specification for details.
start_mark = self.get_mark()
self.forward()
name = self.scan_directive_name(start_mark)
value = None
if name == u'YAML':
value = self.scan_yaml_directive_value(start_mark)
end_mark = self.get_mark()
elif name == u'TAG':
value = self.scan_tag_directive_value(start_mark)
end_mark = self.get_mark()
else:
end_mark = self.get_mark()
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
self.scan_directive_ignored_line(start_mark)
return DirectiveToken(name, value, start_mark, end_mark)
def scan_directive_name(self, start_mark):
# See the specification for details.
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
return value
def scan_yaml_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
major = self.scan_yaml_directive_number(start_mark)
if self.peek() != '.':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or '.', but found %r"
% self.peek().encode('utf-8'),
self.get_mark())
self.forward()
minor = self.scan_yaml_directive_number(start_mark)
if self.peek() not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a digit or ' ', but found %r"
% self.peek().encode('utf-8'),
self.get_mark())
return (major, minor)
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
if not (u'0' <= ch <= u'9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch.encode('utf-8'),
self.get_mark())
length = 0
while u'0' <= self.peek(length) <= u'9':
length += 1
value = int(self.prefix(length))
self.forward(length)
return value
def scan_tag_directive_value(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
handle = self.scan_tag_directive_handle(start_mark)
while self.peek() == u' ':
self.forward()
prefix = self.scan_tag_directive_prefix(start_mark)
return (handle, prefix)
def scan_tag_directive_handle(self, start_mark):
# See the specification for details.
value = self.scan_tag_handle('directive', start_mark)
ch = self.peek()
if ch != u' ':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
return value
def scan_tag_directive_prefix(self, start_mark):
# See the specification for details.
value = self.scan_tag_uri('directive', start_mark)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
return value
def scan_directive_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in u'\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a directive", start_mark,
"expected a comment or a line break, but found %r"
% ch.encode('utf-8'), self.get_mark())
self.scan_line_break()
def scan_anchor(self, TokenClass):
# The specification does not restrict characters for anchors and
# aliases. This may lead to problems, for instance, the document:
# [ *alias, value ]
# can be interpteted in two ways, as
# [ "value" ]
# and
# [ *alias , "value" ]
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
if indicator == u'*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if not length:
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
value = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch not in u'\0 \t\r\n\x85\u2028\u2029?:,]}%@`':
raise ScannerError("while scanning an %s" % name, start_mark,
"expected alphabetic or numeric character, but found %r"
% ch.encode('utf-8'), self.get_mark())
end_mark = self.get_mark()
return TokenClass(value, start_mark, end_mark)
def scan_tag(self):
# See the specification for details.
start_mark = self.get_mark()
ch = self.peek(1)
if ch == u'<':
handle = None
self.forward(2)
suffix = self.scan_tag_uri('tag', start_mark)
if self.peek() != u'>':
raise ScannerError("while parsing a tag", start_mark,
"expected '>', but found %r" % self.peek().encode('utf-8'),
self.get_mark())
self.forward()
elif ch in u'\0 \t\r\n\x85\u2028\u2029':
handle = None
suffix = u'!'
self.forward()
else:
length = 1
use_handle = False
while ch not in u'\0 \r\n\x85\u2028\u2029':
if ch == u'!':
use_handle = True
break
length += 1
ch = self.peek(length)
handle = u'!'
if use_handle:
handle = self.scan_tag_handle('tag', start_mark)
else:
handle = u'!'
self.forward()
suffix = self.scan_tag_uri('tag', start_mark)
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a tag", start_mark,
"expected ' ', but found %r" % ch.encode('utf-8'),
self.get_mark())
value = (handle, suffix)
end_mark = self.get_mark()
return TagToken(value, start_mark, end_mark)
def scan_block_scalar(self, style):
# See the specification for details.
if style == '>':
folded = True
else:
folded = False
chunks = []
start_mark = self.get_mark()
# Scan the header.
self.forward()
chomping, increment = self.scan_block_scalar_indicators(start_mark)
self.scan_block_scalar_ignored_line(start_mark)
# Determine the indentation level and go to the first non-empty line.
min_indent = self.indent+1
if min_indent < 1:
min_indent = 1
if increment is None:
breaks, max_indent, end_mark = self.scan_block_scalar_indentation()
indent = max(min_indent, max_indent)
else:
indent = min_indent+increment-1
breaks, end_mark = self.scan_block_scalar_breaks(indent)
line_break = u''
# Scan the inner part of the block scalar.
while self.column == indent and self.peek() != u'\0':
chunks.extend(breaks)
leading_non_space = self.peek() not in u' \t'
length = 0
while self.peek(length) not in u'\0\r\n\x85\u2028\u2029':
length += 1
chunks.append(self.prefix(length))
self.forward(length)
line_break = self.scan_line_break()
breaks, end_mark = self.scan_block_scalar_breaks(indent)
if self.column == indent and self.peek() != u'\0':
# Unfortunately, folding rules are ambiguous.
#
# This is the folding according to the specification:
if folded and line_break == u'\n' \
and leading_non_space and self.peek() not in u' \t':
if not breaks:
chunks.append(u' ')
else:
chunks.append(line_break)
# This is Clark Evans's interpretation (also in the spec
# examples):
#
#if folded and line_break == u'\n':
# if not breaks:
# if self.peek() not in ' \t':
# chunks.append(u' ')
# else:
# chunks.append(line_break)
#else:
# chunks.append(line_break)
else:
break
# Chomp the tail.
if chomping is not False:
chunks.append(line_break)
if chomping is True:
chunks.extend(breaks)
# We are done.
return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
style)
def scan_block_scalar_indicators(self, start_mark):
# See the specification for details.
chomping = None
increment = None
ch = self.peek()
if ch in u'+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch in u'0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
elif ch in u'0123456789':
increment = int(ch)
if increment == 0:
raise ScannerError("while scanning a block scalar", start_mark,
"expected indentation indicator in the range 1-9, but found 0",
self.get_mark())
self.forward()
ch = self.peek()
if ch in u'+-':
if ch == '+':
chomping = True
else:
chomping = False
self.forward()
ch = self.peek()
if ch not in u'\0 \r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected chomping or indentation indicators, but found %r"
% ch.encode('utf-8'), self.get_mark())
return chomping, increment
def scan_block_scalar_ignored_line(self, start_mark):
# See the specification for details.
while self.peek() == u' ':
self.forward()
if self.peek() == u'#':
while self.peek() not in u'\0\r\n\x85\u2028\u2029':
self.forward()
ch = self.peek()
if ch not in u'\0\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a block scalar", start_mark,
"expected a comment or a line break, but found %r"
% ch.encode('utf-8'), self.get_mark())
self.scan_line_break()
def scan_block_scalar_indentation(self):
# See the specification for details.
chunks = []
max_indent = 0
end_mark = self.get_mark()
while self.peek() in u' \r\n\x85\u2028\u2029':
if self.peek() != u' ':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
else:
self.forward()
if self.column > max_indent:
max_indent = self.column
return chunks, max_indent, end_mark
def scan_block_scalar_breaks(self, indent):
# See the specification for details.
chunks = []
end_mark = self.get_mark()
while self.column < indent and self.peek() == u' ':
self.forward()
while self.peek() in u'\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
end_mark = self.get_mark()
while self.column < indent and self.peek() == u' ':
self.forward()
return chunks, end_mark
def scan_flow_scalar(self, style):
# See the specification for details.
# Note that we loose indentation rules for quoted scalars. Quoted
# scalars don't need to adhere indentation because " and ' clearly
# mark the beginning and the end of them. Therefore we are less
# restrictive then the specification requires. We only need to check
# that document separators are not included in scalars.
if style == '"':
double = True
else:
double = False
chunks = []
start_mark = self.get_mark()
quote = self.peek()
self.forward()
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
while self.peek() != quote:
chunks.extend(self.scan_flow_scalar_spaces(double, start_mark))
chunks.extend(self.scan_flow_scalar_non_spaces(double, start_mark))
self.forward()
end_mark = self.get_mark()
return ScalarToken(u''.join(chunks), False, start_mark, end_mark,
style)
ESCAPE_REPLACEMENTS = {
u'0': u'\0',
u'a': u'\x07',
u'b': u'\x08',
u't': u'\x09',
u'\t': u'\x09',
u'n': u'\x0A',
u'v': u'\x0B',
u'f': u'\x0C',
u'r': u'\x0D',
u'e': u'\x1B',
u' ': u'\x20',
u'\"': u'\"',
u'\\': u'\\',
u'N': u'\x85',
u'_': u'\xA0',
u'L': u'\u2028',
u'P': u'\u2029',
}
ESCAPE_CODES = {
u'x': 2,
u'u': 4,
u'U': 8,
}
def scan_flow_scalar_non_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
length = 0
while self.peek(length) not in u'\'\"\\\0 \t\r\n\x85\u2028\u2029':
length += 1
if length:
chunks.append(self.prefix(length))
self.forward(length)
ch = self.peek()
if not double and ch == u'\'' and self.peek(1) == u'\'':
chunks.append(u'\'')
self.forward(2)
elif (double and ch == u'\'') or (not double and ch in u'\"\\'):
chunks.append(ch)
self.forward()
elif double and ch == u'\\':
self.forward()
ch = self.peek()
if ch in self.ESCAPE_REPLACEMENTS:
chunks.append(self.ESCAPE_REPLACEMENTS[ch])
self.forward()
elif ch in self.ESCAPE_CODES:
length = self.ESCAPE_CODES[ch]
self.forward()
for k in range(length):
if self.peek(k) not in u'0123456789ABCDEFabcdef':
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"expected escape sequence of %d hexdecimal numbers, but found %r" %
(length, self.peek(k).encode('utf-8')), self.get_mark())
code = int(self.prefix(length), 16)
chunks.append(unichr(code))
self.forward(length)
elif ch in u'\r\n\x85\u2028\u2029':
self.scan_line_break()
chunks.extend(self.scan_flow_scalar_breaks(double, start_mark))
else:
raise ScannerError("while scanning a double-quoted scalar", start_mark,
"found unknown escape character %r" % ch.encode('utf-8'), self.get_mark())
else:
return chunks
def scan_flow_scalar_spaces(self, double, start_mark):
# See the specification for details.
chunks = []
length = 0
while self.peek(length) in u' \t':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch == u'\0':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected end of stream", self.get_mark())
elif ch in u'\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
breaks = self.scan_flow_scalar_breaks(double, start_mark)
if line_break != u'\n':
chunks.append(line_break)
elif not breaks:
chunks.append(u' ')
chunks.extend(breaks)
else:
chunks.append(whitespaces)
return chunks
def scan_flow_scalar_breaks(self, double, start_mark):
# See the specification for details.
chunks = []
while True:
# Instead of checking indentation, we check for document
# separators.
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
raise ScannerError("while scanning a quoted scalar", start_mark,
"found unexpected document separator", self.get_mark())
while self.peek() in u' \t':
self.forward()
if self.peek() in u'\r\n\x85\u2028\u2029':
chunks.append(self.scan_line_break())
else:
return chunks
def scan_plain(self):
# See the specification for details.
# We add an additional restriction for the flow context:
# plain scalars in the flow context cannot contain ',', ':' and '?'.
# We also keep track of the `allow_simple_key` flag here.
# Indentation rules are loosed for the flow context.
chunks = []
start_mark = self.get_mark()
end_mark = start_mark
indent = self.indent+1
# We allow zero indentation for scalars, but then we need to check for
# document separators at the beginning of the line.
#if indent == 0:
# indent = 1
spaces = []
while True:
length = 0
if self.peek() == u'#':
break
while True:
ch = self.peek(length)
if ch in u'\0 \t\r\n\x85\u2028\u2029' \
or (not self.flow_level and ch == u':' and
self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
or (self.flow_level and ch in u',:?[]{}'):
break
length += 1
# It's not clear what we should do with ':' in the flow context.
if (self.flow_level and ch == u':'
and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
self.forward(length)
raise ScannerError("while scanning a plain scalar", start_mark,
"found unexpected ':'", self.get_mark(),
"Please check http://pyyaml.org/wiki/YAMLColonInFlowContext for details.")
if length == 0:
break
self.allow_simple_key = False
chunks.extend(spaces)
chunks.append(self.prefix(length))
self.forward(length)
end_mark = self.get_mark()
spaces = self.scan_plain_spaces(indent, start_mark)
if not spaces or self.peek() == u'#' \
or (not self.flow_level and self.column < indent):
break
return ScalarToken(u''.join(chunks), True, start_mark, end_mark)
def scan_plain_spaces(self, indent, start_mark):
# See the specification for details.
# The specification is really confusing about tabs in plain scalars.
# We just forbid them completely. Do not use tabs in YAML!
chunks = []
length = 0
while self.peek(length) in u' ':
length += 1
whitespaces = self.prefix(length)
self.forward(length)
ch = self.peek()
if ch in u'\r\n\x85\u2028\u2029':
line_break = self.scan_line_break()
self.allow_simple_key = True
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return
breaks = []
while self.peek() in u' \r\n\x85\u2028\u2029':
if self.peek() == ' ':
self.forward()
else:
breaks.append(self.scan_line_break())
prefix = self.prefix(3)
if (prefix == u'---' or prefix == u'...') \
and self.peek(3) in u'\0 \t\r\n\x85\u2028\u2029':
return
if line_break != u'\n':
chunks.append(line_break)
elif not breaks:
chunks.append(u' ')
chunks.extend(breaks)
elif whitespaces:
chunks.append(whitespaces)
return chunks
def scan_tag_handle(self, name, start_mark):
# See the specification for details.
# For some strange reasons, the specification does not allow '_' in
# tag handles. I have allowed it anyway.
ch = self.peek()
if ch != u'!':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch.encode('utf-8'),
self.get_mark())
length = 1
ch = self.peek(length)
if ch != u' ':
while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
if ch != u'!':
self.forward(length)
raise ScannerError("while scanning a %s" % name, start_mark,
"expected '!', but found %r" % ch.encode('utf-8'),
self.get_mark())
length += 1
value = self.prefix(length)
self.forward(length)
return value
def scan_tag_uri(self, name, start_mark):
# See the specification for details.
# Note: we do not check if URI is well-formed.
chunks = []
length = 0
ch = self.peek(length)
while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
if ch == u'%':
chunks.append(self.prefix(length))
self.forward(length)
length = 0
chunks.append(self.scan_uri_escapes(name, start_mark))
else:
length += 1
ch = self.peek(length)
if length:
chunks.append(self.prefix(length))
self.forward(length)
length = 0
if not chunks:
raise ScannerError("while parsing a %s" % name, start_mark,
"expected URI, but found %r" % ch.encode('utf-8'),
self.get_mark())
return u''.join(chunks)
def scan_uri_escapes(self, name, start_mark):
# See the specification for details.
bytes = []
mark = self.get_mark()
while self.peek() == u'%':
self.forward()
for k in range(2):
if self.peek(k) not in u'0123456789ABCDEFabcdef':
raise ScannerError("while scanning a %s" % name, start_mark,
"expected URI escape sequence of 2 hexdecimal numbers, but found %r" %
(self.peek(k).encode('utf-8')), self.get_mark())
bytes.append(chr(int(self.prefix(2), 16)))
self.forward(2)
try:
value = unicode(''.join(bytes), 'utf-8')
except UnicodeDecodeError, exc:
raise ScannerError("while scanning a %s" % name, start_mark, str(exc), mark)
return value
def scan_line_break(self):
# Transforms:
# '\r\n' : '\n'
# '\r' : '\n'
# '\n' : '\n'
# '\x85' : '\n'
# '\u2028' : '\u2028'
# '\u2029 : '\u2029'
# default : ''
ch = self.peek()
if ch in u'\r\n\x85':
if self.prefix(2) == u'\r\n':
self.forward(2)
else:
self.forward()
return u'\n'
elif ch in u'\u2028\u2029':
self.forward()
return ch
return u''
#try:
# import psyco
# psyco.bind(Scanner)
#except ImportError:
# pass
|
mit
|
Fl0rianFischer/sme_odoo
|
addons/hr_expense/__openerp__.py
|
23
|
1789
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Expense Tracker',
'version': '2.0',
'category': 'Human Resources',
'sequence': 95,
'summary': 'Expenses Validation, Invoicing',
'description': """
Manage expenses by Employees
============================
This application allows you to manage your employees' daily expenses. It gives you access to your employees’ fee notes and give you the right to complete and validate or refuse the notes. After validation it creates an invoice for the employee.
Employee can encode their own expenses and the validation flow puts it automatically in the accounting after validation by managers.
The whole flow is implemented as:
---------------------------------
* Draft expense
* Submitted by the employee to his manager
* Approved by his manager
* Validation by the accountant and accounting entries creation
This module also uses analytic accounting and is compatible with the invoice on timesheet module so that you are able to automatically re-invoice your customers' expenses if your work by project.
""",
'author': 'Odoo S.A.',
'website': 'https://www.odoo.com/page/expenses',
'depends': ['hr_contract', 'account_accountant', 'report'],
'data': [
'security/ir.model.access.csv',
'data/hr_expense_data.xml',
'data/hr_expense_sequence.xml',
'wizard/hr_expense_refuse_reason.xml',
'views/hr_expense_views.xml',
'security/ir_rule.xml',
'views/hr_expense_installer_views.xml',
'views/report_expense.xml',
'data/web_tip_data.xml',
'views/hr_dashboard.xml',
],
'demo': ['data/hr_expense_demo.xml'],
'installable': True,
'application': True,
}
|
gpl-3.0
|
cdorer/crits
|
crits/core/management/commands/create_default_dashboard.py
|
21
|
3882
|
from django.core.management.base import BaseCommand
from optparse import make_option
class Command(BaseCommand):
"""
Script Class.
"""
option_list = BaseCommand.option_list + (
make_option('--drop',
'-d',
dest='drop',
action="store_true",
default=False,
help='Drop existing content before adding.'),
)
help = 'Creates the default dashboard.'
def handle(self, *args, **options):
"""
Script Execution.
"""
drop = options.get('drop')
if drop:
print "Dropping enabled"
else:
print "Dropping protection enabled"
create_dashboard(drop)
def create_dashboard(drop=False):
from crits.dashboards.dashboard import SavedSearch, Dashboard
if drop:
Dashboard.drop_collection()
SavedSearch.drop_collection()
defaultDashboard = Dashboard.objects(name="Default", analystId__not__exists=1 , isPublic=True).first()
if not defaultDashboard:
defaultDashboard = Dashboard()
defaultDashboard.name = "Default"
defaultDashboard.isPublic = True
defaultDashboard.save()
for title in ["Counts", "Top Campaigns","Recent Indicators",
"Recent Emails", "Recent Samples"]:
savedSearch = SavedSearch()
savedSearch.name = title
savedSearch.dashboard = defaultDashboard.id
savedSearch.isDefaultOnDashboard = True
savedSearch.tableColumns = getColumnsForTable(title)
if title == "Counts":
savedSearch.sizex = 10
elif title == "Top Campaigns":
savedSearch.sizex = 25
elif title == "Counts":
savedSearch.sizey = 13
elif title == "Recent Indicators":
savedSearch.row = 15
elif title == "Recent Emails":
savedSearch.row = 23
elif title == "Recent Samples":
savedSearch.row = 31
savedSearch.save()
print "Default Dashboard Created."
else:
print "Default Dashboard already exists."
def getColumnsForTable(title):
if title == "Counts":
colFields = ["type", "count"]
colNames = ["Type", "Count"]
elif title == "Top Campaigns":
colFields = ["name", "email_count", "indicator_count", "sample_count",
"domain_count", "ip_count", "event_count", "pcap_count"]
colNames = ["Name", "Email Count", "Indicator Count", "Sample Count",
"Domain Count", "IP Count", "Event Count", "PCAP Count"]
elif title == "Recent Indicators":
colFields = ["details","value", "type", "modified", "status", "source", "campaign"]
colNames = ["Details","Value", "Type", "Added", "Status", "Source", "Campaign"]
elif title == "Recent Emails":
colFields = ["details","from", "to", "subject", "isodate", "source", "campaign"]
colNames = ["Details","From", "Recip", "Subject", "Date", "Source", "Campaign"]
elif title == "Recent Samples":
colFields = ["details","filename", "size", "filetype", "created",
"modified", "source", "campaign"]
colNames = ["Details","Filename", "Size", "Filetype", "Added",
"Modified", "Source", "Campaign"]
columns = []
for field, name in zip(colFields, colNames):
if field == "details":
size = "5%"
else:
size = "10%"
col = {
"field": field,
"caption": name,
"size": size,
}
columns.append(col)
return columns
|
mit
|
Teagan42/home-assistant
|
tests/components/zwave/test_climate.py
|
2
|
32574
|
"""Test Z-Wave climate devices."""
import pytest
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
HVAC_MODES,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.components.zwave import climate, const
from homeassistant.components.zwave.climate import (
AUX_HEAT_ZWAVE_MODE,
DEFAULT_HVAC_MODES,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
@pytest.fixture
def device(hass, mock_openzwave):
"""Fixture to provide a precreated climate device."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data=CURRENT_HVAC_HEAT, node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_zxt_120(hass, mock_openzwave):
"""Fixture to provide a precreated climate device."""
node = MockNode(manufacturer_id="5254", product_id="8377")
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data=CURRENT_HVAC_HEAT, node=node),
fan_action=MockValue(data=7, node=node),
zxt_120_swing_mode=MockValue(data="test3", data_items=[6, 7, 8], node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_mapping(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Test state mapping."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data="Heat",
data_items=["Off", "Cool", "Heat", "Full Power", "Auto"],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="heating", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_unknown(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Test state unknown."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data="Heat",
data_items=["Off", "Cool", "Heat", "heat_cool", "Abcdefg"],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_heat_cool(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Test state heat only."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
"Heat Eco",
"Cool Eco",
],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_heat_cool_range(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Target range mode."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT_COOL,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
],
node=node,
),
setpoint_heating=MockValue(data=1, node=node),
setpoint_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_heat_cool_away(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. Target range mode."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT_COOL,
data_items=[
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
PRESET_AWAY,
],
node=node,
),
setpoint_heating=MockValue(data=2, node=node),
setpoint_cooling=MockValue(data=9, node=node),
setpoint_away_heating=MockValue(data=1, node=node),
setpoint_away_cooling=MockValue(data=10, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_heat_eco(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. heat/heat eco."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[HVAC_MODE_OFF, HVAC_MODE_HEAT, "heat econ"],
node=node,
),
setpoint_heating=MockValue(data=2, node=node),
setpoint_eco_heating=MockValue(data=1, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_aux_heat(hass, mock_openzwave):
"""Fixture to provide a precreated climate device. aux heat."""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[HVAC_MODE_OFF, HVAC_MODE_HEAT, "Aux Heat"],
node=node,
),
setpoint_heating=MockValue(data=2, node=node),
setpoint_eco_heating=MockValue(data=1, node=node),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data="test4", node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_single_setpoint(hass, mock_openzwave):
"""Fixture to provide a precreated climate device.
SETPOINT_THERMOSTAT device class.
"""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT, data=1, node=node
),
mode=None,
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data=CURRENT_HVAC_HEAT, node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
@pytest.fixture
def device_single_setpoint_with_mode(hass, mock_openzwave):
"""Fixture to provide a precreated climate device.
SETPOINT_THERMOSTAT device class with COMMAND_CLASS_THERMOSTAT_MODE command class
"""
node = MockNode()
values = MockEntityValues(
primary=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_SETPOINT, data=1, node=node
),
mode=MockValue(
command_class=const.COMMAND_CLASS_THERMOSTAT_MODE,
data=HVAC_MODE_HEAT,
data_items=[HVAC_MODE_OFF, HVAC_MODE_HEAT],
node=node,
),
temperature=MockValue(data=5, node=node, units=None),
fan_mode=MockValue(data="test2", data_items=[3, 4, 5], node=node),
operating_state=MockValue(data=CURRENT_HVAC_HEAT, node=node),
fan_action=MockValue(data=7, node=node),
)
device = climate.get_device(hass, node=node, values=values, node_config={})
yield device
def test_get_device_detects_none(hass, mock_openzwave):
"""Test get_device returns None."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = climate.get_device(hass, node=node, values=values, node_config={})
assert device is None
def test_get_device_detects_multiple_setpoint_device(device):
"""Test get_device returns a Z-Wave multiple setpoint device."""
assert isinstance(device, climate.ZWaveClimateMultipleSetpoint)
def test_get_device_detects_single_setpoint_device(device_single_setpoint):
"""Test get_device returns a Z-Wave single setpoint device."""
assert isinstance(device_single_setpoint, climate.ZWaveClimateSingleSetpoint)
def test_default_hvac_modes():
"""Test wether all hvac modes are included in default_hvac_modes."""
for hvac_mode in HVAC_MODES:
assert hvac_mode in DEFAULT_HVAC_MODES
def test_supported_features(device):
"""Test supported features flags."""
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
)
def test_supported_features_temp_range(device_heat_cool_range):
"""Test supported features flags with target temp range."""
device = device_heat_cool_range
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
)
def test_supported_features_preset_mode(device_mapping):
"""Test supported features flags with swing mode."""
device = device_mapping
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
+ SUPPORT_PRESET_MODE
)
def test_supported_features_preset_mode_away(device_heat_cool_away):
"""Test supported features flags with swing mode."""
device = device_heat_cool_away
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
+ SUPPORT_PRESET_MODE
)
def test_supported_features_swing_mode(device_zxt_120):
"""Test supported features flags with swing mode."""
device = device_zxt_120
assert (
device.supported_features
== SUPPORT_FAN_MODE
+ SUPPORT_TARGET_TEMPERATURE
+ SUPPORT_TARGET_TEMPERATURE_RANGE
+ SUPPORT_SWING_MODE
)
def test_supported_features_aux_heat(device_aux_heat):
"""Test supported features flags with aux heat."""
device = device_aux_heat
assert (
device.supported_features
== SUPPORT_FAN_MODE + SUPPORT_TARGET_TEMPERATURE + SUPPORT_AUX_HEAT
)
def test_supported_features_single_setpoint(device_single_setpoint):
"""Test supported features flags for SETPOINT_THERMOSTAT."""
device = device_single_setpoint
assert device.supported_features == SUPPORT_FAN_MODE + SUPPORT_TARGET_TEMPERATURE
def test_supported_features_single_setpoint_with_mode(device_single_setpoint_with_mode):
"""Test supported features flags for SETPOINT_THERMOSTAT."""
device = device_single_setpoint_with_mode
assert device.supported_features == SUPPORT_FAN_MODE + SUPPORT_TARGET_TEMPERATURE
def test_zxt_120_swing_mode(device_zxt_120):
"""Test operation of the zxt 120 swing mode."""
device = device_zxt_120
assert device.swing_modes == [6, 7, 8]
assert device._zxt_120 == 1
# Test set mode
assert device.values.zxt_120_swing_mode.data == "test3"
device.set_swing_mode("test_swing_set")
assert device.values.zxt_120_swing_mode.data == "test_swing_set"
# Test mode changed
value_changed(device.values.zxt_120_swing_mode)
assert device.swing_mode == "test_swing_set"
device.values.zxt_120_swing_mode.data = "test_swing_updated"
value_changed(device.values.zxt_120_swing_mode)
assert device.swing_mode == "test_swing_updated"
def test_temperature_unit(device):
"""Test temperature unit."""
assert device.temperature_unit == TEMP_CELSIUS
device.values.temperature.units = "F"
value_changed(device.values.temperature)
assert device.temperature_unit == TEMP_FAHRENHEIT
device.values.temperature.units = "C"
value_changed(device.values.temperature)
assert device.temperature_unit == TEMP_CELSIUS
def test_data_lists(device):
"""Test data lists from zwave value items."""
assert device.fan_modes == [3, 4, 5]
assert device.hvac_modes == [
HVAC_MODE_OFF,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
HVAC_MODE_HEAT_COOL,
]
assert device.preset_modes == []
device.values.primary = None
assert device.preset_modes == []
def test_data_lists_single_setpoint(device_single_setpoint):
"""Test data lists from zwave value items."""
device = device_single_setpoint
assert device.fan_modes == [3, 4, 5]
assert device.hvac_modes == []
assert device.preset_modes == []
def test_data_lists_single_setpoint_with_mode(device_single_setpoint_with_mode):
"""Test data lists from zwave value items."""
device = device_single_setpoint_with_mode
assert device.fan_modes == [3, 4, 5]
assert device.hvac_modes == [HVAC_MODE_OFF, HVAC_MODE_HEAT]
assert device.preset_modes == []
def test_data_lists_mapping(device_mapping):
"""Test data lists from zwave value items."""
device = device_mapping
assert device.hvac_modes == ["off", "cool", "heat", "heat_cool"]
assert device.preset_modes == ["boost", "none"]
device.values.primary = None
assert device.preset_modes == []
def test_target_value_set(device):
"""Test values changed for climate device."""
assert device.values.setpoint_heating.data == 1
assert device.values.setpoint_cooling.data == 10
device.set_temperature()
assert device.values.setpoint_heating.data == 1
assert device.values.setpoint_cooling.data == 10
device.set_temperature(**{ATTR_TEMPERATURE: 2})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 10
device.set_hvac_mode(HVAC_MODE_COOL)
value_changed(device.values.primary)
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 10
device.set_temperature(**{ATTR_TEMPERATURE: 9})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 9
def test_target_value_set_range(device_heat_cool_range):
"""Test values changed for climate device."""
device = device_heat_cool_range
assert device.values.setpoint_heating.data == 1
assert device.values.setpoint_cooling.data == 10
device.set_temperature()
assert device.values.setpoint_heating.data == 1
assert device.values.setpoint_cooling.data == 10
device.set_temperature(**{ATTR_TARGET_TEMP_LOW: 2})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 10
device.set_temperature(**{ATTR_TARGET_TEMP_HIGH: 9})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 9
device.set_temperature(**{ATTR_TARGET_TEMP_LOW: 3, ATTR_TARGET_TEMP_HIGH: 8})
assert device.values.setpoint_heating.data == 3
assert device.values.setpoint_cooling.data == 8
def test_target_value_set_range_away(device_heat_cool_away):
"""Test values changed for climate device."""
device = device_heat_cool_away
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 9
assert device.values.setpoint_away_heating.data == 1
assert device.values.setpoint_away_cooling.data == 10
device.set_preset_mode(PRESET_AWAY)
device.set_temperature(**{ATTR_TARGET_TEMP_LOW: 0, ATTR_TARGET_TEMP_HIGH: 11})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_cooling.data == 9
assert device.values.setpoint_away_heating.data == 0
assert device.values.setpoint_away_cooling.data == 11
def test_target_value_set_eco(device_heat_eco):
"""Test values changed for climate device."""
device = device_heat_eco
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_eco_heating.data == 1
device.set_preset_mode("heat econ")
device.set_temperature(**{ATTR_TEMPERATURE: 0})
assert device.values.setpoint_heating.data == 2
assert device.values.setpoint_eco_heating.data == 0
def test_target_value_set_single_setpoint(device_single_setpoint):
"""Test values changed for climate device."""
device = device_single_setpoint
assert device.values.primary.data == 1
device.set_temperature(**{ATTR_TEMPERATURE: 2})
assert device.values.primary.data == 2
def test_operation_value_set(device):
"""Test values changed for climate device."""
assert device.values.primary.data == HVAC_MODE_HEAT
device.set_hvac_mode(HVAC_MODE_COOL)
assert device.values.primary.data == HVAC_MODE_COOL
device.set_preset_mode(PRESET_ECO)
assert device.values.primary.data == PRESET_ECO
device.set_preset_mode(PRESET_NONE)
assert device.values.primary.data == HVAC_MODE_HEAT_COOL
device.values.primary = None
device.set_hvac_mode("test_set_failes")
assert device.values.primary is None
device.set_preset_mode("test_set_failes")
assert device.values.primary is None
def test_operation_value_set_mapping(device_mapping):
"""Test values changed for climate device. Mapping."""
device = device_mapping
assert device.values.primary.data == "Heat"
device.set_hvac_mode(HVAC_MODE_COOL)
assert device.values.primary.data == "Cool"
device.set_hvac_mode(HVAC_MODE_OFF)
assert device.values.primary.data == "Off"
device.set_preset_mode(PRESET_BOOST)
assert device.values.primary.data == "Full Power"
device.set_preset_mode(PRESET_ECO)
assert device.values.primary.data == "eco"
def test_operation_value_set_unknown(device_unknown):
"""Test values changed for climate device. Unknown."""
device = device_unknown
assert device.values.primary.data == "Heat"
device.set_preset_mode("Abcdefg")
assert device.values.primary.data == "Abcdefg"
device.set_preset_mode(PRESET_NONE)
assert device.values.primary.data == HVAC_MODE_HEAT_COOL
def test_operation_value_set_heat_cool(device_heat_cool):
"""Test values changed for climate device. Heat/Cool only."""
device = device_heat_cool
assert device.values.primary.data == HVAC_MODE_HEAT
device.set_preset_mode("Heat Eco")
assert device.values.primary.data == "Heat Eco"
device.set_preset_mode(PRESET_NONE)
assert device.values.primary.data == HVAC_MODE_HEAT
device.set_preset_mode("Cool Eco")
assert device.values.primary.data == "Cool Eco"
device.set_preset_mode(PRESET_NONE)
assert device.values.primary.data == HVAC_MODE_COOL
def test_fan_mode_value_set(device):
"""Test values changed for climate device."""
assert device.values.fan_mode.data == "test2"
device.set_fan_mode("test_fan_set")
assert device.values.fan_mode.data == "test_fan_set"
device.values.fan_mode = None
device.set_fan_mode("test_fan_set_failes")
assert device.values.fan_mode is None
def test_target_value_changed(device):
"""Test values changed for climate device."""
assert device.target_temperature == 1
device.values.setpoint_heating.data = 2
value_changed(device.values.setpoint_heating)
assert device.target_temperature == 2
device.values.primary.data = HVAC_MODE_COOL
value_changed(device.values.primary)
assert device.target_temperature == 10
device.values.setpoint_cooling.data = 9
value_changed(device.values.setpoint_cooling)
assert device.target_temperature == 9
def test_target_range_changed(device_heat_cool_range):
"""Test values changed for climate device."""
device = device_heat_cool_range
assert device.target_temperature_low == 1
assert device.target_temperature_high == 10
device.values.setpoint_heating.data = 2
value_changed(device.values.setpoint_heating)
assert device.target_temperature_low == 2
assert device.target_temperature_high == 10
device.values.setpoint_cooling.data = 9
value_changed(device.values.setpoint_cooling)
assert device.target_temperature_low == 2
assert device.target_temperature_high == 9
def test_target_changed_preset_range(device_heat_cool_away):
"""Test values changed for climate device."""
device = device_heat_cool_away
assert device.target_temperature_low == 2
assert device.target_temperature_high == 9
device.values.primary.data = PRESET_AWAY
value_changed(device.values.primary)
assert device.target_temperature_low == 1
assert device.target_temperature_high == 10
device.values.setpoint_away_heating.data = 0
value_changed(device.values.setpoint_away_heating)
device.values.setpoint_away_cooling.data = 11
value_changed(device.values.setpoint_away_cooling)
assert device.target_temperature_low == 0
assert device.target_temperature_high == 11
device.values.primary.data = HVAC_MODE_HEAT_COOL
value_changed(device.values.primary)
assert device.target_temperature_low == 2
assert device.target_temperature_high == 9
def test_target_changed_eco(device_heat_eco):
"""Test values changed for climate device."""
device = device_heat_eco
assert device.target_temperature == 2
device.values.primary.data = "heat econ"
value_changed(device.values.primary)
assert device.target_temperature == 1
device.values.setpoint_eco_heating.data = 0
value_changed(device.values.setpoint_eco_heating)
assert device.target_temperature == 0
device.values.primary.data = HVAC_MODE_HEAT
value_changed(device.values.primary)
assert device.target_temperature == 2
def test_target_changed_with_mode(device):
"""Test values changed for climate device."""
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.target_temperature == 1
device.values.primary.data = HVAC_MODE_COOL
value_changed(device.values.primary)
assert device.target_temperature == 10
device.values.primary.data = HVAC_MODE_HEAT_COOL
value_changed(device.values.primary)
assert device.target_temperature_low == 1
assert device.target_temperature_high == 10
def test_target_value_changed_single_setpoint(device_single_setpoint):
"""Test values changed for climate device."""
device = device_single_setpoint
assert device.target_temperature == 1
device.values.primary.data = 2
value_changed(device.values.primary)
assert device.target_temperature == 2
def test_temperature_value_changed(device):
"""Test values changed for climate device."""
assert device.current_temperature == 5
device.values.temperature.data = 3
value_changed(device.values.temperature)
assert device.current_temperature == 3
def test_operation_value_changed(device):
"""Test values changed for climate device."""
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = HVAC_MODE_COOL
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_COOL
assert device.preset_mode == PRESET_NONE
device.values.primary.data = HVAC_MODE_OFF
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_OFF
assert device.preset_mode == PRESET_NONE
device.values.primary = None
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == PRESET_NONE
def test_operation_value_changed_preset(device_mapping):
"""Test preset changed for climate device."""
device = device_mapping
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = PRESET_ECO
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == PRESET_ECO
def test_operation_value_changed_mapping(device_mapping):
"""Test values changed for climate device. Mapping."""
device = device_mapping
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Off"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_OFF
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Cool"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_COOL
assert device.preset_mode == PRESET_NONE
def test_operation_value_changed_mapping_preset(device_mapping):
"""Test values changed for climate device. Mapping with presets."""
device = device_mapping
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Full Power"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == PRESET_BOOST
device.values.primary = None
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == PRESET_NONE
def test_operation_value_changed_unknown(device_unknown):
"""Test preset changed for climate device. Unknown."""
device = device_unknown
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Abcdefg"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_HEAT_COOL
assert device.preset_mode == "Abcdefg"
def test_operation_value_changed_heat_cool(device_heat_cool):
"""Test preset changed for climate device. Heat/Cool only."""
device = device_heat_cool
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == PRESET_NONE
device.values.primary.data = "Cool Eco"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_COOL
assert device.preset_mode == "Cool Eco"
device.values.primary.data = "Heat Eco"
value_changed(device.values.primary)
assert device.hvac_mode == HVAC_MODE_HEAT
assert device.preset_mode == "Heat Eco"
def test_fan_mode_value_changed(device):
"""Test values changed for climate device."""
assert device.fan_mode == "test2"
device.values.fan_mode.data = "test_updated_fan"
value_changed(device.values.fan_mode)
assert device.fan_mode == "test_updated_fan"
def test_hvac_action_value_changed(device):
"""Test values changed for climate device."""
assert device.hvac_action == CURRENT_HVAC_HEAT
device.values.operating_state.data = CURRENT_HVAC_COOL
value_changed(device.values.operating_state)
assert device.hvac_action == CURRENT_HVAC_COOL
def test_hvac_action_value_changed_mapping(device_mapping):
"""Test values changed for climate device."""
device = device_mapping
assert device.hvac_action == CURRENT_HVAC_HEAT
device.values.operating_state.data = "cooling"
value_changed(device.values.operating_state)
assert device.hvac_action == CURRENT_HVAC_COOL
def test_hvac_action_value_changed_unknown(device_unknown):
"""Test values changed for climate device."""
device = device_unknown
assert device.hvac_action == "test4"
device.values.operating_state.data = "another_hvac_action"
value_changed(device.values.operating_state)
assert device.hvac_action == "another_hvac_action"
def test_fan_action_value_changed(device):
"""Test values changed for climate device."""
assert device.device_state_attributes[climate.ATTR_FAN_ACTION] == 7
device.values.fan_action.data = 9
value_changed(device.values.fan_action)
assert device.device_state_attributes[climate.ATTR_FAN_ACTION] == 9
def test_aux_heat_unsupported_set(device):
"""Test aux heat for climate device."""
device = device
assert device.values.primary.data == HVAC_MODE_HEAT
device.turn_aux_heat_on()
assert device.values.primary.data == HVAC_MODE_HEAT
device.turn_aux_heat_off()
assert device.values.primary.data == HVAC_MODE_HEAT
def test_aux_heat_unsupported_value_changed(device):
"""Test aux heat for climate device."""
device = device
assert device.is_aux_heat is None
device.values.primary.data = HVAC_MODE_HEAT
value_changed(device.values.primary)
assert device.is_aux_heat is None
def test_aux_heat_set(device_aux_heat):
"""Test aux heat for climate device."""
device = device_aux_heat
assert device.values.primary.data == HVAC_MODE_HEAT
device.turn_aux_heat_on()
assert device.values.primary.data == AUX_HEAT_ZWAVE_MODE
device.turn_aux_heat_off()
assert device.values.primary.data == HVAC_MODE_HEAT
def test_aux_heat_value_changed(device_aux_heat):
"""Test aux heat for climate device."""
device = device_aux_heat
assert device.is_aux_heat is False
device.values.primary.data = AUX_HEAT_ZWAVE_MODE
value_changed(device.values.primary)
assert device.is_aux_heat is True
device.values.primary.data = HVAC_MODE_HEAT
value_changed(device.values.primary)
assert device.is_aux_heat is False
|
apache-2.0
|
dbckz/ansible
|
lib/ansible/plugins/terminal/asa.py
|
57
|
2408
|
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import json
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(r"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(r"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(r"% ?Error"),
re.compile(r"^% \w+", re.M),
re.compile(r"% ?Bad secret"),
re.compile(r"invalid input", re.I),
re.compile(r"(?:incomplete|ambiguous) command", re.I),
re.compile(r"connection timed out", re.I),
re.compile(r"[^\r\n]+ not found", re.I),
re.compile(r"'[^']' +returned error code: ?\d+"),
]
def authorize(self, passwd=None):
if self._get_prompt().endswith('#'):
return
cmd = {'command': 'enable'}
if passwd:
cmd['prompt'] = r"[\r\n]?password: $"
cmd['answer'] = passwd
try:
self._exec_cli_command(json.dumps(cmd))
self._exec_cli_command('terminal pager 0')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
def on_deauthorize(self):
prompt = self._get_prompt()
if prompt is None:
# if prompt is None most likely the terminal is hung up at a prompt
return
if '(config' in prompt:
self._exec_cli_command('end')
self._exec_cli_command('disable')
elif prompt.endswith('#'):
self._exec_cli_command('disable')
|
gpl-3.0
|
havard024/prego
|
crm/lib/python2.7/site-packages/whoosh/matching/binary.py
|
94
|
24452
|
# Copyright 2010 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
from whoosh.matching import mcore
class BiMatcher(mcore.Matcher):
"""Base class for matchers that combine the results of two sub-matchers in
some way.
"""
def __init__(self, a, b):
super(BiMatcher, self).__init__()
self.a = a
self.b = b
def reset(self):
self.a.reset()
self.b.reset()
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.a, self.b)
def children(self):
return [self.a, self.b]
def copy(self):
return self.__class__(self.a.copy(), self.b.copy())
def depth(self):
return 1 + max(self.a.depth(), self.b.depth())
def skip_to(self, id):
if not self.is_active():
raise mcore.ReadTooFar
ra = self.a.skip_to(id)
rb = self.b.skip_to(id)
return ra or rb
def supports_block_quality(self):
return (self.a.supports_block_quality()
and self.b.supports_block_quality())
def supports(self, astype):
return self.a.supports(astype) and self.b.supports(astype)
class AdditiveBiMatcher(BiMatcher):
"""Base class for binary matchers where the scores of the sub-matchers are
added together.
"""
def max_quality(self):
q = 0.0
if self.a.is_active():
q += self.a.max_quality()
if self.b.is_active():
q += self.b.max_quality()
return q
def block_quality(self):
bq = 0.0
if self.a.is_active():
bq += self.a.block_quality()
if self.b.is_active():
bq += self.b.block_quality()
return bq
def weight(self):
return (self.a.weight() + self.b.weight())
def score(self):
return (self.a.score() + self.b.score())
def __eq__(self, other):
return self.__class__ is type(other)
def __lt__(self, other):
return type(other) is self.__class__
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class UnionMatcher(AdditiveBiMatcher):
"""Matches the union (OR) of the postings in the two sub-matchers.
"""
_id = None
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# If neither sub-matcher on its own has a high enough max quality to
# contribute, convert to an intersection matcher
if minquality and a_active and b_active:
a_max = a.max_quality()
b_max = b.max_quality()
if a_max < minquality and b_max < minquality:
return IntersectionMatcher(a, b).replace(minquality)
elif a_max < minquality:
return AndMaybeMatcher(b, a)
elif b_max < minquality:
return AndMaybeMatcher(a, b)
# If one or both of the sub-matchers are inactive, convert
if not (a_active or b_active):
return mcore.NullMatcher()
elif not a_active:
return b.replace(minquality)
elif not b_active:
return a.replace(minquality)
a = a.replace(minquality - b.max_quality() if minquality else 0)
b = b.replace(minquality - a.max_quality() if minquality else 0)
# If one of the sub-matchers changed, return a new union
if a is not self.a or b is not self.b:
return self.__class__(a, b)
else:
self._id = None
return self
def is_active(self):
return self.a.is_active() or self.b.is_active()
def skip_to(self, id):
self._id = None
ra = rb = False
if self.a.is_active():
ra = self.a.skip_to(id)
if self.b.is_active():
rb = self.b.skip_to(id)
return ra or rb
def id(self):
_id = self._id
if _id is not None:
return _id
a = self.a
b = self.b
if not a.is_active():
_id = b.id()
elif not b.is_active():
_id = a.id()
else:
_id = min(a.id(), b.id())
self._id = _id
return _id
# Using sets is faster in most cases, but could potentially use a lot of
# memory. Comment out this method override to not use sets.
#def all_ids(self):
# return iter(sorted(set(self.a.all_ids()) | set(self.b.all_ids())))
def next(self):
self._id = None
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# Shortcut when one matcher is inactive
if not (a_active or b_active):
raise mcore.ReadTooFar
elif not a_active:
return b.next()
elif not b_active:
return a.next()
a_id = a.id()
b_id = b.id()
ar = br = None
# After all that, here's the actual implementation
if a_id <= b_id:
ar = a.next()
if b_id <= a_id:
br = b.next()
return ar or br
def spans(self):
if not self.a.is_active():
return self.b.spans()
if not self.b.is_active():
return self.a.spans()
id_a = self.a.id()
id_b = self.b.id()
if id_a < id_b:
return self.a.spans()
elif id_b < id_a:
return self.b.spans()
else:
return sorted(set(self.a.spans()) | set(self.b.spans()))
def weight(self):
a = self.a
b = self.b
if not a.is_active():
return b.weight()
if not b.is_active():
return a.weight()
id_a = a.id()
id_b = b.id()
if id_a < id_b:
return a.weight()
elif id_b < id_a:
return b.weight()
else:
return (a.weight() + b.weight())
def score(self):
a = self.a
b = self.b
if not a.is_active():
return b.score()
if not b.is_active():
return a.score()
id_a = a.id()
id_b = b.id()
if id_a < id_b:
return a.score()
elif id_b < id_a:
return b.score()
else:
return (a.score() + b.score())
def skip_to_quality(self, minquality):
self._id = None
a = self.a
b = self.b
if not (a.is_active() or b.is_active()):
raise mcore.ReadTooFar
# Short circuit if one matcher is inactive
if not a.is_active():
return b.skip_to_quality(minquality)
elif not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
skipped += a.skip_to_quality(minquality - bq)
aq = a.block_quality()
else:
skipped += b.skip_to_quality(minquality - aq)
bq = b.block_quality()
return skipped
class DisjunctionMaxMatcher(UnionMatcher):
"""Matches the union (OR) of two sub-matchers. Where both sub-matchers
match the same posting, returns the weight/score of the higher-scoring
posting.
"""
# TODO: this class inherits from AdditiveBiMatcher (through UnionMatcher)
# but it does not add the scores of the sub-matchers together (it
# overrides all methods that perform addition). Need to clean up the
# inheritance.
def __init__(self, a, b, tiebreak=0.0):
super(DisjunctionMaxMatcher, self).__init__(a, b)
self.tiebreak = tiebreak
def copy(self):
return self.__class__(self.a.copy(), self.b.copy(),
tiebreak=self.tiebreak)
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
# DisMax takes the max of the sub-matcher qualities instead of adding
# them, so we need special logic here
if minquality and a_active and b_active:
a_max = a.max_quality()
b_max = b.max_quality()
if a_max < minquality and b_max < minquality:
# If neither sub-matcher has a high enough max quality to
# contribute, return an inactive matcher
return mcore.NullMatcher()
elif b_max < minquality:
# If the b matcher can't contribute, return a
return a.replace(minquality)
elif a_max < minquality:
# If the a matcher can't contribute, return b
return b.replace(minquality)
if not (a_active or b_active):
return mcore.NullMatcher()
elif not a_active:
return b.replace(minquality)
elif not b_active:
return a.replace(minquality)
# We CAN pass the minquality down here, since we don't add the two
# scores together
a = a.replace(minquality)
b = b.replace(minquality)
a_active = a.is_active()
b_active = b.is_active()
# It's kind of tedious to check for inactive sub-matchers all over
# again here after we replace them, but it's probably better than
# returning a replacement with an inactive sub-matcher
if not (a_active and b_active):
return mcore.NullMatcher()
elif not a_active:
return b
elif not b_active:
return a
elif a is not self.a or b is not self.b:
# If one of the sub-matchers changed, return a new DisMax
return self.__class__(a, b)
else:
return self
def score(self):
if not self.a.is_active():
return self.b.score()
elif not self.b.is_active():
return self.a.score()
else:
return max(self.a.score(), self.b.score())
def max_quality(self):
return max(self.a.max_quality(), self.b.max_quality())
def block_quality(self):
return max(self.a.block_quality(), self.b.block_quality())
def skip_to_quality(self, minquality):
a = self.a
b = self.b
# Short circuit if one matcher is inactive
if not a.is_active():
sk = b.skip_to_quality(minquality)
return sk
elif not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and max(aq, bq) <= minquality:
if aq <= minquality:
skipped += a.skip_to_quality(minquality)
aq = a.block_quality()
if bq <= minquality:
skipped += b.skip_to_quality(minquality)
bq = b.block_quality()
return skipped
class IntersectionMatcher(AdditiveBiMatcher):
"""Matches the intersection (AND) of the postings in the two sub-matchers.
"""
def __init__(self, a, b):
super(IntersectionMatcher, self).__init__(a, b)
self._find_first()
def reset(self):
self.a.reset()
self.b.reset()
self._find_first()
def _find_first(self):
if (self.a.is_active()
and self.b.is_active()
and self.a.id() != self.b.id()):
self._find_next()
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
if not (a_active and b_active):
# Intersection matcher requires that both sub-matchers be active
return mcore.NullMatcher()
if minquality:
a_max = a.max_quality()
b_max = b.max_quality()
if a_max + b_max < minquality:
# If the combined quality of the sub-matchers can't contribute,
# return an inactive matcher
return mcore.NullMatcher()
# Require that the replacements be able to contribute results
# higher than the minquality
a_min = minquality - b_max
b_min = minquality - a_max
else:
a_min = b_min = 0
a = a.replace(a_min)
b = b.replace(b_min)
a_active = a.is_active()
b_active = b.is_active()
if not (a_active or b_active):
return mcore.NullMatcher()
elif not a_active:
return b
elif not b_active:
return a
elif a is not self.a or b is not self.b:
return self.__class__(a, b)
else:
return self
def is_active(self):
return self.a.is_active() and self.b.is_active()
def _find_next(self):
a = self.a
b = self.b
a_id = a.id()
b_id = b.id()
assert a_id != b_id
r = False
while a.is_active() and b.is_active() and a_id != b_id:
if a_id < b_id:
ra = a.skip_to(b_id)
if not a.is_active():
return
r = r or ra
a_id = a.id()
else:
rb = b.skip_to(a_id)
if not b.is_active():
return
r = r or rb
b_id = b.id()
return r
def id(self):
return self.a.id()
# Using sets is faster in some cases, but could potentially use a lot of
# memory
def all_ids(self):
return iter(sorted(set(self.a.all_ids()) & set(self.b.all_ids())))
def skip_to(self, id):
if not self.is_active():
raise mcore.ReadTooFar
ra = self.a.skip_to(id)
rb = self.b.skip_to(id)
if self.is_active():
rn = False
if self.a.id() != self.b.id():
rn = self._find_next()
return ra or rb or rn
def skip_to_quality(self, minquality):
a = self.a
b = self.b
minquality = minquality
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
# If the block quality of A is less than B, skip A ahead until
# it can contribute at least the balance of the required min
# quality when added to B
sk = a.skip_to_quality(minquality - bq)
skipped += sk
if not sk and a.is_active():
# The matcher couldn't skip ahead for some reason, so just
# advance and try again
a.next()
else:
# And vice-versa
sk = b.skip_to_quality(minquality - aq)
skipped += sk
if not sk and b.is_active():
b.next()
if not a.is_active() or not b.is_active():
# One of the matchers is exhausted
break
if a.id() != b.id():
# We want to always leave in a state where the matchers are at
# the same document, so call _find_next() to sync them
self._find_next()
# Get the block qualities at the new matcher positions
aq = a.block_quality()
bq = b.block_quality()
return skipped
def next(self):
if not self.is_active():
raise mcore.ReadTooFar
# We must assume that the ids are equal whenever next() is called (they
# should have been made equal by _find_next), so advance them both
ar = self.a.next()
if self.is_active():
nr = self._find_next()
return ar or nr
def spans(self):
return sorted(set(self.a.spans()) | set(self.b.spans()))
class AndNotMatcher(BiMatcher):
"""Matches the postings in the first sub-matcher that are NOT present in
the second sub-matcher.
"""
def __init__(self, a, b):
super(AndNotMatcher, self).__init__(a, b)
self._find_first()
def reset(self):
self.a.reset()
self.b.reset()
self._find_first()
def _find_first(self):
if (self.a.is_active()
and self.b.is_active()
and self.a.id() == self.b.id()):
self._find_next()
def is_active(self):
return self.a.is_active()
def _find_next(self):
pos = self.a
neg = self.b
if not neg.is_active():
return
pos_id = pos.id()
r = False
if neg.id() < pos_id:
neg.skip_to(pos_id)
while pos.is_active() and neg.is_active() and pos_id == neg.id():
nr = pos.next()
if not pos.is_active():
break
r = r or nr
pos_id = pos.id()
neg.skip_to(pos_id)
return r
def supports_block_quality(self):
return self.a.supports_block_quality()
def replace(self, minquality=0):
if not self.a.is_active():
# The a matcher is required, so if it's inactive, return an
# inactive matcher
return mcore.NullMatcher()
elif (minquality
and self.a.max_quality() < minquality):
# If the quality of the required matcher isn't high enough to
# contribute, return an inactive matcher
return mcore.NullMatcher()
elif not self.b.is_active():
# If the prohibited matcher is inactive, convert to just the
# required matcher
return self.a.replace(minquality)
a = self.a.replace(minquality)
b = self.b.replace()
if a is not self.a or b is not self.b:
# If one of the sub-matchers was replaced, return a new AndNot
return self.__class__(a, b)
else:
return self
def max_quality(self):
return self.a.max_quality()
def block_quality(self):
return self.a.block_quality()
def skip_to_quality(self, minquality):
skipped = self.a.skip_to_quality(minquality)
self._find_next()
return skipped
def id(self):
return self.a.id()
def next(self):
if not self.a.is_active():
raise mcore.ReadTooFar
ar = self.a.next()
nr = False
if self.a.is_active() and self.b.is_active():
nr = self._find_next()
return ar or nr
def skip_to(self, id):
if not self.a.is_active():
raise mcore.ReadTooFar
if id < self.a.id():
return
self.a.skip_to(id)
if self.b.is_active():
self.b.skip_to(id)
self._find_next()
def weight(self):
return self.a.weight()
def score(self):
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
class AndMaybeMatcher(AdditiveBiMatcher):
"""Matches postings in the first sub-matcher, and if the same posting is
in the second sub-matcher, adds their scores.
"""
def __init__(self, a, b):
AdditiveBiMatcher.__init__(self, a, b)
self._first_b()
def reset(self):
self.a.reset()
self.b.reset()
self._first_b()
def _first_b(self):
a = self.a
b = self.b
if a.is_active() and b.is_active() and a.id() != b.id():
b.skip_to(a.id())
def is_active(self):
return self.a.is_active()
def id(self):
return self.a.id()
def next(self):
if not self.a.is_active():
raise mcore.ReadTooFar
ar = self.a.next()
br = False
if self.a.is_active() and self.b.is_active():
br = self.b.skip_to(self.a.id())
return ar or br
def skip_to(self, id):
if not self.a.is_active():
raise mcore.ReadTooFar
ra = self.a.skip_to(id)
rb = False
if self.a.is_active() and self.b.is_active():
rb = self.b.skip_to(id)
return ra or rb
def replace(self, minquality=0):
a = self.a
b = self.b
a_active = a.is_active()
b_active = b.is_active()
if not a_active:
return mcore.NullMatcher()
elif minquality and b_active:
if a.max_quality() + b.max_quality() < minquality:
# If the combined max quality of the sub-matchers isn't high
# enough to possibly contribute, return an inactive matcher
return mcore.NullMatcher()
elif a.max_quality() < minquality:
# If the max quality of the main sub-matcher isn't high enough
# to ever contribute without the optional sub- matcher, change
# into an IntersectionMatcher
return IntersectionMatcher(self.a, self.b)
elif not b_active:
return a.replace(minquality)
new_a = a.replace(minquality - b.max_quality())
new_b = b.replace(minquality - a.max_quality())
if new_a is not a or new_b is not b:
# If one of the sub-matchers changed, return a new AndMaybe
return self.__class__(new_a, new_b)
else:
return self
def skip_to_quality(self, minquality):
a = self.a
b = self.b
minquality = minquality
if not a.is_active():
raise mcore.ReadTooFar
if not b.is_active():
return a.skip_to_quality(minquality)
skipped = 0
aq = a.block_quality()
bq = b.block_quality()
while a.is_active() and b.is_active() and aq + bq <= minquality:
if aq < bq:
skipped += a.skip_to_quality(minquality - bq)
aq = a.block_quality()
else:
skipped += b.skip_to_quality(minquality - aq)
bq = b.block_quality()
return skipped
def weight(self):
if self.a.id() == self.b.id():
return self.a.weight() + self.b.weight()
else:
return self.a.weight()
def score(self):
if self.b.is_active() and self.a.id() == self.b.id():
return self.a.score() + self.b.score()
else:
return self.a.score()
def supports(self, astype):
return self.a.supports(astype)
def value(self):
return self.a.value()
def value_as(self, astype):
return self.a.value_as(astype)
|
mit
|
912/M-new
|
virtualenvironment/experimental/lib/python2.7/site-packages/django/contrib/admin/tests.py
|
19
|
6158
|
import os
from unittest import SkipTest
from django.contrib.staticfiles.testing import StaticLiveServerCase
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
class AdminSeleniumWebDriverTestCase(StaticLiveServerCase):
available_apps = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
]
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
if not os.environ.get('DJANGO_SELENIUM_TESTS', False):
raise SkipTest('Selenium tests not requested')
try:
cls.selenium = import_string(cls.webdriver_class)()
except Exception as e:
raise SkipTest('Selenium webdriver "%s" not installed or not '
'operational: %s' % (cls.webdriver_class, str(e)))
# This has to be last to ensure that resources are cleaned up properly!
super(AdminSeleniumWebDriverTestCase, cls).setUpClass()
@classmethod
def _tearDownClassInternal(cls):
if hasattr(cls, 'selenium'):
cls.selenium.quit()
super(AdminSeleniumWebDriverTestCase, cls)._tearDownClassInternal()
def wait_until(self, callback, timeout=10):
"""
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
"""
from selenium.webdriver.support.wait import WebDriverWait
WebDriverWait(self.selenium, timeout).until(callback)
def wait_loaded_tag(self, tag_name, timeout=10):
"""
Helper function that blocks until the element with the given tag name
is found on the page.
"""
self.wait_for(tag_name, timeout)
def wait_for(self, css_selector, timeout=10):
"""
Helper function that blocks until a CSS selector is found on the page.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.presence_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_for_text(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the text is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_for_value(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the value is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element_value(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_page_loaded(self):
"""
Block until page has started to load.
"""
from selenium.common.exceptions import TimeoutException
try:
# Wait for the next page to be loaded
self.wait_loaded_tag('body')
except TimeoutException:
# IE7 occasionally returns an error "Internet Explorer cannot
# display the webpage" and doesn't load the next page. We just
# ignore it.
pass
def admin_login(self, username, password, login_url='/admin/'):
"""
Helper function to log into the admin.
"""
self.selenium.get('%s%s' % (self.live_server_url, login_url))
username_input = self.selenium.find_element_by_name('username')
username_input.send_keys(username)
password_input = self.selenium.find_element_by_name('password')
password_input.send_keys(password)
login_text = _('Log in')
self.selenium.find_element_by_xpath(
'//input[@value="%s"]' % login_text).click()
self.wait_page_loaded()
def get_css_value(self, selector, attribute):
"""
Helper function that returns the value for the CSS attribute of an
DOM element specified by the given selector. Uses the jQuery that ships
with Django.
"""
return self.selenium.execute_script(
'return django.jQuery("%s").css("%s")' % (selector, attribute))
def get_select_option(self, selector, value):
"""
Returns the <OPTION> with the value `value` inside the <SELECT> widget
identified by the CSS selector `selector`.
"""
from selenium.common.exceptions import NoSuchElementException
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
for option in options:
if option.get_attribute('value') == value:
return option
raise NoSuchElementException('Option "%s" not found in "%s"' % (value, selector))
def assertSelectOptions(self, selector, values):
"""
Asserts that the <SELECT> widget identified by `selector` has the
options with the given `values`.
"""
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
actual_values = []
for option in options:
actual_values.append(option.get_attribute('value'))
self.assertEqual(values, actual_values)
def has_css_class(self, selector, klass):
"""
Returns True if the element identified by `selector` has the CSS class
`klass`.
"""
return (self.selenium.find_element_by_css_selector(selector)
.get_attribute('class').find(klass) != -1)
|
gpl-2.0
|
frobnitzem/forcesolve
|
cg_topol/edge.py
|
1
|
3933
|
# Edge creation logic
def add_edge(edge, i, j):
if i < j:
edge.add((i,j))
else:
edge.add((j,i))
def srt2(i,j):
if i > j:
return j,i
return i,j
def add_all_redge(edge, pdb, res_table, oi, ai, oj, aj):
mino = min(oi,oj)
maxo = max(oi,oj)
for i in reversed(range(len(res_table))):
if res_table[i]+mino < 0 \
or res_table[i]+maxo >= len(pdb.res):
continue
cchain = pdb.res[res_table[i]].chain_id
ir = pdb.res[res_table[i]+oi]
jr = pdb.res[res_table[i]+oj]
if cchain != ir.chain_id or cchain != jr.chain_id:
continue
try:
iat = [a.split()[0] for a in ir.names].index(ai)
except ValueError:
print "Warning! From atom %s not present in "\
"residue %s %d offset %d from %s"%(ai, \
ir.name, res_table[i]+oi+1, oi, \
pdb.res[res_table[i]].name)
del res_table[i]
continue
try:
jat = [a.split()[0] for a in jr.names].index(aj)
except ValueError:
# Common for variable-composition residues...
continue
add_edge(edge, ir.atom_zero+iat, jr.atom_zero+jat)
def append_edge(edge, pdb, line):
i = int(line[0])-1
if i < 0 or i > pdb.atoms:
raise InputError, "Error EDGE line contains out-of"\
"-range from atom number %d.\n"%(i+1)
for to in line[1:]:
j = int(to)
if j < 0 or j > pdb.atoms:
raise InputError, "Error EDGE line contains out-of"\
"-range to atom number %d->%d.\n"%(i+1, j+1)
add_edge(edge, i, j)
def append_redge(edge, pdb, line):
rname = line[0]
anames = parse_aname(line[1:])
res_table = [ r for r in range(len(pdb.res)) \
if pdb.res[r].name == rname ]
#print anames
oi = anames[0][0]
ai = anames[0][1]
for oj, aj in anames[1:]:
add_all_redge(edge, pdb, res_table, oi,ai, oj,aj)
# Parses a list of names and optional offsets into a list
# of the form [(off, name), (off, name), ...]
def parse_aname(tok):
resname = []
i = 0
off = 0 # Default.
while i < len(tok):
if tok[i][0] in "+-":
off = int(tok[i]) # Read an optional offset.
i += 1
else:
off = 0
resname.append((off, tok[i]))
i += 1
return resname
# Modular product set of input sets.
# Very useful for enumerating those pesky angles/torsions...
def modprod(*a):
b = [ [(i,) for i in a[0]] ]
for d in range(1, len(a)):
b.append([])
for j in a[d]:
b[d] += [i+(j,) for i in b[d-1]]
c = set(b[-1])
del b
return c
################ old 1,n pair finding code ####################
# set join
mconcat = lambda m: reduce(lambda x,y: x|y, m, set())
# extend neighbors
extend = lambda pdb, x: x | mconcat(pdb.conn[b] for b in x)
def orderset(a, x):
s = set()
for b in x:
if a > b:
s.add((b,a))
else:
s.add((a,b))
return s
# n = 4 => include 1,4 pairs, but exclude 1,2 and 1,3
# builds an excluded pair list
def pair_excl(pdb, n=4):
assert n >= 2, "Can't count self-pairs."
xpair = [set([a]) for a in range(pdb.atoms)]
for i in range(n-2): # Extend table by 1 bond.
for a in range(pdb.atoms):
xpair[a] = extend(pdb, xpair[a])
xpair = mconcat([orderset(a,x) for a,x in enumerate(xpair)])
return xpair
# find 1,n pairs
def pair_n(pdb, n=4):
assert n >= 2, "Need at least 2 atoms to make a pair!"
xpair = [set([a]) for a in range(pdb.atoms)]
for i in range(n-2): # Extend table by 1 bond.
for a in range(pdb.atoms):
xpair[a] = extend(pdb, xpair[a])
pair_n = [extend(pdb, x) - x for x in xpair]
pair_n = mconcat([orderset(a, x) for a,x in enumerate(pair_n)])
return pair_n
|
gpl-3.0
|
ryfeus/lambda-packs
|
Keras_tensorflow_nightly/source2.7/numpy/distutils/mingw32ccompiler.py
|
4
|
25201
|
"""
Support code for building Python extensions on Windows.
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# 3. Force windows to use g77
"""
from __future__ import division, absolute_import, print_function
import os
import sys
import subprocess
import re
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler
if sys.version_info[0] < 3:
from . import log
else:
from numpy.distutils import log
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# --> this is done in numpy/distutils/ccompiler.py
# 3. Force windows to use g77
import distutils.cygwinccompiler
from distutils.version import StrictVersion
from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
from distutils.errors import (DistutilsExecError, CompileError,
UnknownFileError)
from numpy.distutils.misc_util import (msvc_runtime_library,
msvc_runtime_version,
msvc_runtime_major,
get_build_architecture)
def get_msvcr_replacement():
"""Replacement for outdated version of get_msvcr from cygwinccompiler"""
msvcr = msvc_runtime_library()
return [] if msvcr is None else [msvcr]
# monkey-patch cygwinccompiler with our updated version from misc_util
# to avoid getting an exception raised on Python 3.5
distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement
# Useful to generate table of symbols from a dll
_START = re.compile(r'\[Ordinal/Name Pointer\] Table')
_TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)')
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
""" A modified MingW32 compiler compatible with an MSVC built Python.
"""
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose,
dry_run, force)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
import re
p = subprocess.Popen(['gcc', '-dumpversion'], shell=True,
stdout=subprocess.PIPE)
out_string = p.stdout.read()
p.stdout.close()
result = re.search(r'(\d+\.\d+)', out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.linker_dll == 'dllwrap':
# Commented out '--driver-name g++' part that fixes weird
# g++.exe: g++: No such file or directory
# error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
# If the --driver-name part is required for some environment
# then make the inclusion of this part specific to that
# environment.
self.linker = 'dllwrap' # --driver-name g++'
elif self.linker_dll == 'gcc':
self.linker = 'g++'
# **changes: eric jones 4/11/01
# 1. Check for import library on Windows. Build if it doesn't exist.
build_import_library()
# Check for custom msvc runtime library on Windows. Build if it doesn't exist.
msvcr_success = build_msvcr_library()
msvcr_dbg_success = build_msvcr_library(debug=True)
if msvcr_success or msvcr_dbg_success:
# add preprocessor statement for using customized msvcr lib
self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR')
# Define the MSVC version as hint for MinGW
msvcr_version = msvc_runtime_version()
if msvcr_version:
self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version)
# MS_WIN64 should be defined when building for amd64 on windows,
# but python headers define it only for MS compilers, which has all
# kind of bad consequences, like using Py_ModuleInit4 instead of
# Py_ModuleInit4_64, etc... So we add it here
if get_build_architecture() == 'AMD64':
if self.gcc_version < "4.0":
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0'
' -Wall -Wstrict-prototypes',
linker_exe='gcc -g -mno-cygwin',
linker_so='gcc -g -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(
compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall',
compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes',
linker_exe='gcc -g',
linker_so='gcc -g -shared')
else:
if self.gcc_version <= "3.0.0":
self.set_executables(
compiler='gcc -mno-cygwin -O2 -w',
compiler_so='gcc -mno-cygwin -mdll -O2 -w'
' -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='%s -mno-cygwin -mdll -static %s' %
(self.linker, entry_point))
elif self.gcc_version < "4.0":
self.set_executables(
compiler='gcc -mno-cygwin -O2 -Wall',
compiler_so='gcc -mno-cygwin -O2 -Wall'
' -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='g++ -mno-cygwin -shared')
else:
# gcc-4 series releases do not support -mno-cygwin option
self.set_executables(compiler='gcc -O2 -Wall',
compiler_so='gcc -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ ',
linker_so='g++ -shared')
# added for python2.3 support
# we can't pass it through set_executables because pre 2.2 would fail
self.compiler_cxx = ['g++']
# Maybe we should also append -mthreads, but then the finished dlls
# need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support
# thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
return
# __init__ ()
def link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols = None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# Include the appropriate MSVC runtime library if Python was built
# with MSVC >= 7.0 (MinGW standard is msvcrt)
runtime_library = msvc_runtime_library()
if runtime_library:
if not libraries:
libraries = []
libraries.append(runtime_library)
args = (self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, #export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
if self.gcc_version < "3.0.0":
func = distutils.cygwinccompiler.CygwinCCompiler.link
else:
func = UnixCCompiler.link
func(*args[:func.__code__.co_argcount])
return
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
# added these lines to strip off windows drive letters
# without it, .o files are placed next to .c files
# instead of the build directory
drv, base = os.path.splitdrive(base)
if drv:
base = base[1:]
if ext not in (self.src_extensions + ['.rc', '.res']):
raise UnknownFileError(
"unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def find_python_dll():
# We can't do much here:
# - find it in the virtualenv (sys.prefix)
# - find it in python main dir (sys.base_prefix, if in a virtualenv)
# - sys.real_prefix is main dir for virtualenvs in Python 2.7
# - in system32,
# - ortherwise (Sxs), I don't know how to get it.
stems = [sys.prefix]
if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
stems.append(sys.real_prefix)
sub_dirs = ['', 'lib', 'bin']
# generate possible combinations of directory trees and sub-directories
lib_dirs = []
for stem in stems:
for folder in sub_dirs:
lib_dirs.append(os.path.join(stem, folder))
# add system directory as well
if 'SYSTEMROOT' in os.environ:
lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32'))
# search in the file system for possible candidates
major_version, minor_version = tuple(sys.version_info[:2])
patterns = ['python%d%d.dll']
for pat in patterns:
dllname = pat % (major_version, minor_version)
print("Looking for %s" % dllname)
for folder in lib_dirs:
dll = os.path.join(folder, dllname)
if os.path.exists(dll):
return dll
raise ValueError("%s not found in %s" % (dllname, lib_dirs))
def dump_table(dll):
st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE)
return st.stdout.readlines()
def generate_def(dll, dfile):
"""Given a dll file location, get all its exported symbols and dump them
into the given def file.
The .def file will be overwritten"""
dump = dump_table(dll)
for i in range(len(dump)):
if _START.match(dump[i].decode()):
break
else:
raise ValueError("Symbol table not found")
syms = []
for j in range(i+1, len(dump)):
m = _TABLE.match(dump[j].decode())
if m:
syms.append((int(m.group(1).strip()), m.group(2)))
else:
break
if len(syms) == 0:
log.warn('No symbols found in %s' % dll)
d = open(dfile, 'w')
d.write('LIBRARY %s\n' % os.path.basename(dll))
d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n')
d.write(';DATA PRELOAD SINGLE\n')
d.write('\nEXPORTS\n')
for s in syms:
#d.write('@%d %s\n' % (s[0], s[1]))
d.write('%s\n' % s[1])
d.close()
def find_dll(dll_name):
arch = {'AMD64' : 'amd64',
'Intel' : 'x86'}[get_build_architecture()]
def _find_dll_in_winsxs(dll_name):
# Walk through the WinSxS directory to find the dll.
winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'),
'winsxs')
if not os.path.exists(winsxs_path):
return None
for root, dirs, files in os.walk(winsxs_path):
if dll_name in files and arch in root:
return os.path.join(root, dll_name)
return None
def _find_dll_in_path(dll_name):
# First, look in the Python directory, then scan PATH for
# the given dll name.
for path in [sys.prefix] + os.environ['PATH'].split(';'):
filepath = os.path.join(path, dll_name)
if os.path.exists(filepath):
return os.path.abspath(filepath)
return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name)
def build_msvcr_library(debug=False):
if os.name != 'nt':
return False
# If the version number is None, then we couldn't find the MSVC runtime at
# all, because we are running on a Python distribution which is customed
# compiled; trust that the compiler is the same as the one available to us
# now, and that it is capable of linking with the correct runtime without
# any extra options.
msvcr_ver = msvc_runtime_major()
if msvcr_ver is None:
log.debug('Skip building import library: '
'Runtime is not compiled with MSVC')
return False
# Skip using a custom library for versions < MSVC 8.0
if msvcr_ver < 80:
log.debug('Skip building msvcr library:'
' custom functionality not present')
return False
msvcr_name = msvc_runtime_library()
if debug:
msvcr_name += 'd'
# Skip if custom library already exists
out_name = "lib%s.a" % msvcr_name
out_file = os.path.join(sys.prefix, 'libs', out_name)
if os.path.isfile(out_file):
log.debug('Skip building msvcr library: "%s" exists' %
(out_file,))
return True
# Find the msvcr dll
msvcr_dll_name = msvcr_name + '.dll'
dll_file = find_dll(msvcr_dll_name)
if not dll_file:
log.warn('Cannot build msvcr library: "%s" not found' %
msvcr_dll_name)
return False
def_name = "lib%s.def" % msvcr_name
def_file = os.path.join(sys.prefix, 'libs', def_name)
log.info('Building msvcr library: "%s" (from %s)' \
% (out_file, dll_file))
# Generate a symbol definition file from the msvcr dll
generate_def(dll_file, def_file)
# Create a custom mingw library for the given symbol definitions
cmd = ['dlltool', '-d', def_file, '-l', out_file]
retcode = subprocess.call(cmd)
# Clean up symbol definitions
os.remove(def_file)
return (not retcode)
def build_import_library():
if os.name != 'nt':
return
arch = get_build_architecture()
if arch == 'AMD64':
return _build_import_library_amd64()
elif arch == 'Intel':
return _build_import_library_x86()
else:
raise ValueError("Unhandled arch %s" % arch)
def _check_for_import_lib():
"""Check if an import library for the Python runtime already exists."""
major_version, minor_version = tuple(sys.version_info[:2])
# patterns for the file name of the library itself
patterns = ['libpython%d%d.a',
'libpython%d%d.dll.a',
'libpython%d.%d.dll.a']
# directory trees that may contain the library
stems = [sys.prefix]
if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix:
stems.append(sys.base_prefix)
elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix:
stems.append(sys.real_prefix)
# possible subdirectories within those trees where it is placed
sub_dirs = ['libs', 'lib']
# generate a list of candidate locations
candidates = []
for pat in patterns:
filename = pat % (major_version, minor_version)
for stem_dir in stems:
for folder in sub_dirs:
candidates.append(os.path.join(stem_dir, folder, filename))
# test the filesystem to see if we can find any of these
for fullname in candidates:
if os.path.isfile(fullname):
# already exists, in location given
return (True, fullname)
# needs to be built, preferred location given first
return (False, candidates[0])
def _build_import_library_amd64():
out_exists, out_file = _check_for_import_lib()
if out_exists:
log.debug('Skip building import library: "%s" exists', out_file)
return
# get the runtime dll for which we are building import library
dll_file = find_python_dll()
log.info('Building import library (arch=AMD64): "%s" (from %s)' %
(out_file, dll_file))
# generate symbol list from this library
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
generate_def(dll_file, def_file)
# generate import library from this symbol list
cmd = ['dlltool', '-d', def_file, '-l', out_file]
subprocess.Popen(cmd)
def _build_import_library_x86():
""" Build the import libraries for Mingw32-gcc on Windows
"""
out_exists, out_file = _check_for_import_lib()
if out_exists:
log.debug('Skip building import library: "%s" exists', out_file)
return
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
lib_file = os.path.join(sys.prefix, 'libs', lib_name)
if not os.path.isfile(lib_file):
# didn't find library file in virtualenv, try base distribution, too,
# and use that instead if found there. for Python 2.7 venvs, the base
# directory is in attribute real_prefix instead of base_prefix.
if hasattr(sys, 'base_prefix'):
base_lib = os.path.join(sys.base_prefix, 'libs', lib_name)
elif hasattr(sys, 'real_prefix'):
base_lib = os.path.join(sys.real_prefix, 'libs', lib_name)
else:
base_lib = '' # os.path.isfile('') == False
if os.path.isfile(base_lib):
lib_file = base_lib
else:
log.warn('Cannot build import library: "%s" not found', lib_file)
return
log.info('Building import library (ARCH=x86): "%s"', out_file)
from numpy.distutils import lib2def
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix, 'libs', def_name)
nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
nm_output = lib2def.getnm(nm_cmd)
dlist, flist = lib2def.parse_nm(nm_output)
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
dll_name = find_python_dll ()
args = (dll_name, def_file, out_file)
cmd = 'dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args
status = os.system(cmd)
# for now, fail silently
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
return
#=====================================
# Dealing with Visual Studio MANIFESTS
#=====================================
# Functions to deal with visual studio manifests. Manifest are a mechanism to
# enforce strong DLL versioning on windows, and has nothing to do with
# distutils MANIFEST. manifests are XML files with version info, and used by
# the OS loader; they are necessary when linking against a DLL not in the
# system path; in particular, official python 2.6 binary is built against the
# MS runtime 9 (the one from VS 2008), which is not available on most windows
# systems; python 2.6 installer does install it in the Win SxS (Side by side)
# directory, but this requires the manifest for this to work. This is a big
# mess, thanks MS for a wonderful system.
# XXX: ideally, we should use exactly the same version as used by python. I
# submitted a patch to get this version, but it was only included for python
# 2.6.1 and above. So for versions below, we use a "best guess".
_MSVCRVER_TO_FULLVER = {}
if sys.platform == 'win32':
try:
import msvcrt
# I took one version in my SxS directory: no idea if it is the good
# one, and we can't retrieve it from python
_MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42"
_MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8"
# Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0
# on Windows XP:
_MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460"
if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"):
major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2)
_MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION
del major, minor, rest
except ImportError:
# If we are here, means python was not built with MSVC. Not sure what
# to do in that case: manifest building will fail, but it should not be
# used in that case anyway
log.warn('Cannot import msvcrt: using manifest will not be possible')
def msvc_manifest_xml(maj, min):
"""Given a major and minor version of the MSVCR, returns the
corresponding XML file."""
try:
fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)]
except KeyError:
raise ValueError("Version %d,%d of MSVCRT not supported yet" %
(maj, min))
# Don't be fooled, it looks like an XML, but it is not. In particular, it
# should not have any space before starting, and its size should be
# divisible by 4, most likely for alignement constraints when the xml is
# embedded in the binary...
# This template was copied directly from the python 2.6 binary (using
# strings.exe from mingw on python.exe).
template = """\
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
</dependentAssembly>
</dependency>
</assembly>"""
return template % {'fullver': fullver, 'maj': maj, 'min': min}
def manifest_rc(name, type='dll'):
"""Return the rc file used to generate the res file which will be embedded
as manifest for given manifest file name, of given type ('dll' or
'exe').
Parameters
----------
name : str
name of the manifest file to embed
type : str {'dll', 'exe'}
type of the binary which will embed the manifest
"""
if type == 'dll':
rctype = 2
elif type == 'exe':
rctype = 1
else:
raise ValueError("Type %s not supported" % type)
return """\
#include "winuser.h"
%d RT_MANIFEST %s""" % (rctype, name)
def check_embedded_msvcr_match_linked(msver):
"""msver is the ms runtime version used for the MANIFEST."""
# check msvcr major version are the same for linking and
# embedding
maj = msvc_runtime_major()
if maj:
if not maj == int(msver):
raise ValueError(
"Discrepancy between linked msvcr " \
"(%d) and the one about to be embedded " \
"(%d)" % (int(msver), maj))
def configtest_name(config):
base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c"))
return os.path.splitext(base)[0]
def manifest_name(config):
# Get configest name (including suffix)
root = configtest_name(config)
exext = config.compiler.exe_extension
return root + exext + ".manifest"
def rc_name(config):
# Get configtest name (including suffix)
root = configtest_name(config)
return root + ".rc"
def generate_manifest(config):
msver = get_build_msvc_version()
if msver is not None:
if msver >= 8:
check_embedded_msvcr_match_linked(msver)
ma = int(msver)
mi = int((msver - ma) * 10)
# Write the manifest file
manxml = msvc_manifest_xml(ma, mi)
man = open(manifest_name(config), "w")
config.temp_files.append(manifest_name(config))
man.write(manxml)
man.close()
|
mit
|
loopCM/chromium
|
third_party/tlslite/tlslite/X509CertChain.py
|
76
|
9052
|
"""Class representing an X.509 certificate chain."""
from utils import cryptomath
from X509 import X509
class X509CertChain:
"""This class represents a chain of X.509 certificates.
@type x509List: list
@ivar x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
def __init__(self, x509List=None):
"""Create a new X509CertChain.
@type x509List: list
@param x509List: A list of L{tlslite.X509.X509} instances,
starting with the end-entity certificate and with every
subsequent certificate certifying the previous.
"""
if x509List:
self.x509List = x509List
else:
self.x509List = []
def parseChain(self, s):
"""Parse a PEM-encoded X.509 certificate file chain file.
@type s: str
@param s: A PEM-encoded (eg: Base64) X.509 certificate file, with every
certificate wrapped within "-----BEGIN CERTIFICATE-----" and
"-----END CERTIFICATE-----" tags). Extraneous data outside such tags,
such as human readable representations, will be ignored.
"""
class PEMIterator(object):
"""Simple iterator over PEM-encoded certificates within a string.
@type data: string
@ivar data: A string containing PEM-encoded (Base64) certificates,
with every certificate wrapped within "-----BEGIN CERTIFICATE-----"
and "-----END CERTIFICATE-----" tags). Extraneous data outside such
tags, such as human readable representations, will be ignored.
@type index: integer
@ivar index: The current offset within data to begin iterating from.
"""
_CERTIFICATE_HEADER = "-----BEGIN CERTIFICATE-----"
"""The PEM encoding block header for X.509 certificates."""
_CERTIFICATE_FOOTER = "-----END CERTIFICATE-----"
"""The PEM encoding block footer for X.509 certificates."""
def __init__(self, s):
self.data = s
self.index = 0
def __iter__(self):
return self
def next(self):
"""Iterates and returns the next L{tlslite.X509.X509}
certificate in data.
@rtype tlslite.X509.X509
"""
self.index = self.data.find(self._CERTIFICATE_HEADER,
self.index)
if self.index == -1:
raise StopIteration
end = self.data.find(self._CERTIFICATE_FOOTER, self.index)
if end == -1:
raise StopIteration
certStr = self.data[self.index+len(self._CERTIFICATE_HEADER) :
end]
self.index = end + len(self._CERTIFICATE_FOOTER)
bytes = cryptomath.base64ToBytes(certStr)
return X509().parseBinary(bytes)
self.x509List = list(PEMIterator(s))
return self
def getNumCerts(self):
"""Get the number of certificates in this chain.
@rtype: int
"""
return len(self.x509List)
def getEndEntityPublicKey(self):
"""Get the public key from the end-entity certificate.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].publicKey
def getFingerprint(self):
"""Get the hex-encoded fingerprint of the end-entity certificate.
@rtype: str
@return: A hex-encoded fingerprint.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getFingerprint()
def getCommonName(self):
"""Get the Subject's Common Name from the end-entity certificate.
The cryptlib_py module must be installed in order to use this
function.
@rtype: str or None
@return: The CN component of the certificate's subject DN, if
present.
"""
if self.getNumCerts() == 0:
raise AssertionError()
return self.x509List[0].getCommonName()
def validate(self, x509TrustList):
"""Check the validity of the certificate chain.
This checks that every certificate in the chain validates with
the subsequent one, until some certificate validates with (or
is identical to) one of the passed-in root certificates.
The cryptlib_py module must be installed in order to use this
function.
@type x509TrustList: list of L{tlslite.X509.X509}
@param x509TrustList: A list of trusted root certificates. The
certificate chain must extend to one of these certificates to
be considered valid.
"""
import cryptlib_py
c1 = None
c2 = None
lastC = None
rootC = None
try:
rootFingerprints = [c.getFingerprint() for c in x509TrustList]
#Check that every certificate in the chain validates with the
#next one
for cert1, cert2 in zip(self.x509List, self.x509List[1:]):
#If we come upon a root certificate, we're done.
if cert1.getFingerprint() in rootFingerprints:
return True
c1 = cryptlib_py.cryptImportCert(cert1.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
c2 = cryptlib_py.cryptImportCert(cert2.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
try:
cryptlib_py.cryptCheckCert(c1, c2)
except:
return False
cryptlib_py.cryptDestroyCert(c1)
c1 = None
cryptlib_py.cryptDestroyCert(c2)
c2 = None
#If the last certificate is one of the root certificates, we're
#done.
if self.x509List[-1].getFingerprint() in rootFingerprints:
return True
#Otherwise, find a root certificate that the last certificate
#chains to, and validate them.
lastC = cryptlib_py.cryptImportCert(self.x509List[-1].writeBytes(),
cryptlib_py.CRYPT_UNUSED)
for rootCert in x509TrustList:
rootC = cryptlib_py.cryptImportCert(rootCert.writeBytes(),
cryptlib_py.CRYPT_UNUSED)
if self._checkChaining(lastC, rootC):
try:
cryptlib_py.cryptCheckCert(lastC, rootC)
return True
except:
return False
return False
finally:
if not (c1 is None):
cryptlib_py.cryptDestroyCert(c1)
if not (c2 is None):
cryptlib_py.cryptDestroyCert(c2)
if not (lastC is None):
cryptlib_py.cryptDestroyCert(lastC)
if not (rootC is None):
cryptlib_py.cryptDestroyCert(rootC)
def _checkChaining(self, lastC, rootC):
import cryptlib_py
import array
def compareNames(name):
try:
length = cryptlib_py.cryptGetAttributeString(lastC, name, None)
lastName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(lastC, name, lastName)
lastName = lastName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
lastName = None
try:
length = cryptlib_py.cryptGetAttributeString(rootC, name, None)
rootName = array.array('B', [0] * length)
cryptlib_py.cryptGetAttributeString(rootC, name, rootName)
rootName = rootName.tostring()
except cryptlib_py.CryptException, e:
if e[0] == cryptlib_py.CRYPT_ERROR_NOTFOUND:
rootName = None
return lastName == rootName
cryptlib_py.cryptSetAttribute(lastC,
cryptlib_py.CRYPT_CERTINFO_ISSUERNAME,
cryptlib_py.CRYPT_UNUSED)
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COUNTRYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_LOCALITYNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_ORGANIZATIONALUNITNAME):
return False
if not compareNames(cryptlib_py.CRYPT_CERTINFO_COMMONNAME):
return False
return True
|
bsd-3-clause
|
anurag03/integration_tests
|
cfme/utils/apidoc.py
|
4
|
1823
|
"""Sphinx plugin for automatically generating (and optionally cleaning) project api documentation
To enable the optional cleaning, set ``clean_autogenerated_docs`` to ``True`` in docs/conf.py
"""
import subprocess
from sphinx.util.console import bold, red
from cfme.utils.path import docs_path, project_path
# When adding/removing from this list, remember to edit docs/modules.rst to match
#: List of modules/packages to document, paths relative to the project root.
modules_to_document = ['cfme', 'fixtures']
_doc_modules_path = docs_path.join('modules')
def setup(sphinx):
"""Main sphinx entry point, calls sphinx-apidoc"""
for module in modules_to_document:
module_path = project_path.join(module).strpath
tests_exclude_path = project_path.join(module, 'tests').strpath
output_module_path = _doc_modules_path.join(module).strpath
# Shove stdout into a pipe to supress the output, but still let stderr out
args = ['sphinx-apidoc', '-T', '-e', '-o', output_module_path, module_path,
tests_exclude_path]
proc = subprocess.Popen(args, stdout=subprocess.PIPE)
proc.wait()
sphinx.add_config_value('clean_autogenerated_docs', False, rebuild='')
sphinx.connect('build-finished', purge_module_apidoc)
def purge_module_apidoc(sphinx, exception):
# Short out if not supposed to run
if not sphinx.config.clean_autogenerated_docs:
return
try:
sphinx.info(bold('cleaning autogenerated docs... '), nonl=True)
_doc_modules_path.ensure(dir=True)
_doc_modules_path.remove(rec=True)
sphinx.info(message='done')
except Exception as ex:
sphinx.info(red('failed to clean autogenerated docs'))
sphinx.info(red(type(ex).__name__) + ' ', nonl=True)
sphinx.info(str(ex))
|
gpl-2.0
|
merutak/python-social-auth
|
social/backends/loginradius.py
|
83
|
2617
|
"""
LoginRadius BaseOAuth2 backend, docs at:
http://psa.matiasaguirre.net/docs/backends/loginradius.html
"""
from social.backends.oauth import BaseOAuth2
class LoginRadiusAuth(BaseOAuth2):
"""LoginRadius BaseOAuth2 authentication backend."""
name = 'loginradius'
ID_KEY = 'ID'
ACCESS_TOKEN_URL = 'https://api.loginradius.com/api/v2/access_token'
PROFILE_URL = 'https://api.loginradius.com/api/v2/userprofile'
ACCESS_TOKEN_METHOD = 'GET'
REDIRECT_STATE = False
STATE_PARAMETER = False
def uses_redirect(self):
"""Return False because we return HTML instead."""
return False
def auth_html(self):
key, secret = self.get_key_and_secret()
tpl = self.setting('TEMPLATE', 'loginradius.html')
return self.strategy.render_html(tpl=tpl, context={
'backend': self,
'LOGINRADIUS_KEY': key,
'LOGINRADIUS_REDIRECT_URL': self.get_redirect_uri()
})
def request_access_token(self, *args, **kwargs):
return self.get_json(params={
'token': self.data.get('token'),
'secret': self.setting('SECRET')
}, *args, **kwargs)
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service. Implement in subclass."""
return self.get_json(
self.PROFILE_URL,
params={'access_token': access_token},
data=self.auth_complete_params(self.validate_state()),
headers=self.auth_headers(),
method=self.ACCESS_TOKEN_METHOD
)
def get_user_details(self, response):
"""Must return user details in a know internal struct:
{'username': <username if any>,
'email': <user email if any>,
'fullname': <user full name if any>,
'first_name': <user first name if any>,
'last_name': <user last name if any>}
"""
profile = {
'username': response['NickName'] or '',
'email': response['Email'][0]['Value'] or '',
'fullname': response['FullName'] or '',
'first_name': response['FirstName'] or '',
'last_name': response['LastName'] or ''
}
return profile
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response. Since LoginRadius handles multiple providers, we need to
distinguish them to prevent conflicts."""
return '{0}-{1}'.format(response.get('Provider'),
response.get(self.ID_KEY))
|
bsd-3-clause
|
mark-me/Pi-Jukebox
|
venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/urllib3/util/__init__.py
|
204
|
1044
|
from __future__ import absolute_import
# For backwards compatibility, provide imports that used to be here.
from .connection import is_connection_dropped
from .request import make_headers
from .response import is_fp_closed
from .ssl_ import (
SSLContext,
HAS_SNI,
IS_PYOPENSSL,
IS_SECURETRANSPORT,
assert_fingerprint,
resolve_cert_reqs,
resolve_ssl_version,
ssl_wrap_socket,
)
from .timeout import (
current_time,
Timeout,
)
from .retry import Retry
from .url import (
get_host,
parse_url,
split_first,
Url,
)
from .wait import (
wait_for_read,
wait_for_write
)
__all__ = (
'HAS_SNI',
'IS_PYOPENSSL',
'IS_SECURETRANSPORT',
'SSLContext',
'Retry',
'Timeout',
'Url',
'assert_fingerprint',
'current_time',
'is_connection_dropped',
'is_fp_closed',
'get_host',
'parse_url',
'make_headers',
'resolve_cert_reqs',
'resolve_ssl_version',
'split_first',
'ssl_wrap_socket',
'wait_for_read',
'wait_for_write'
)
|
agpl-3.0
|
cswiercz/sympy
|
sympy/physics/unitsystems/tests/test_unitsystem.py
|
92
|
2822
|
# -*- coding: utf-8 -*-
from sympy.physics.unitsystems.dimensions import Dimension, DimensionSystem
from sympy.physics.unitsystems.units import Unit, UnitSystem
from sympy.physics.unitsystems.quantities import Quantity
from sympy.utilities.pytest import raises
length = Dimension(name="length", symbol="L", length=1)
mass = Dimension(name="mass", symbol="M", mass=1)
time = Dimension(name="time", symbol="T", time=1)
current = Dimension(name="current", symbol="I", current=1)
velocity = Dimension(name="velocity", symbol="V", length=1, time=-1)
action = Dimension(name="action", symbol="A", length=2, mass=1, time=-2)
m = Unit(length, abbrev="m")
s = Unit(time, abbrev="s")
kg = Unit(mass, factor=10**3, abbrev="kg")
c = Unit(velocity, abbrev="c")
def test_definition():
# want to test if the system can have several units of the same dimension
dm = Unit(m, factor=0.1)
base = (m, s)
base_dim = (m.dim, s.dim)
ms = UnitSystem(base, (c, dm), "MS", "MS system")
assert set(ms._base_units) == set(base)
assert set(ms._units) == set((m, s, c, dm))
#assert ms._units == DimensionSystem._sort_dims(base + (velocity,))
assert ms.name == "MS"
assert ms.descr == "MS system"
assert ms._system._base_dims == DimensionSystem.sort_dims(base_dim)
assert set(ms._system._dims) == set(base_dim + (velocity,))
def test_error_definition():
raises(ValueError, lambda: UnitSystem((m, s, c)))
def test_str_repr():
assert str(UnitSystem((m, s), name="MS")) == "MS"
assert str(UnitSystem((m, s))) == "(m, s)"
assert (repr(UnitSystem((m, s))) == "<UnitSystem: (%s, %s)>"
% (m.abbrev_dim, s.abbrev_dim))
def test_call():
A = Unit(current)
Js = Unit(action)
mksa = UnitSystem((m, kg, s, A), (Js,))
assert mksa(Js) == mksa.print_unit_base(Js)
assert mksa(Js.dim) == mksa._system(Js.dim)
q = Quantity(10, Js)
assert mksa(q) == "%g %s" % (q.factor, mksa(Js))
def test_get_unit():
ms = UnitSystem((m, s), (c,))
assert ms.get_unit("s") == s
assert ms.get_unit(s) == s
assert ms.get_unit(Unit(time)) == s
assert ms["s"] == ms.get_unit("s")
raises(KeyError, lambda: ms["g"])
def test_print_unit_base():
A = Unit(current)
Js = Unit(action)
mksa = UnitSystem((m, kg, s, A), (Js,))
assert mksa.print_unit_base(Js) == "0.001 m^2 kg s^-2"
def test_extend():
ms = UnitSystem((m, s), (c,))
Js = Unit(action)
mks = ms.extend((kg,), (Js,))
res = UnitSystem((m, s, kg), (c, Js))
assert set(mks._base_units) == set(res._base_units)
assert set(mks._units) == set(res._units)
def test_dim():
dimsys = UnitSystem((m, kg, s), (c,))
assert dimsys.dim == 3
def test_is_consistent():
assert UnitSystem((m, s)).is_consistent is True
|
bsd-3-clause
|
spektom/incubator-airflow
|
tests/providers/google/cloud/operators/test_sftp_to_gcs_system.py
|
4
|
1875
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Build operators"""
from tests.providers.google.cloud.operators.test_sftp_to_gcs_system_helper import SFTPtoGcsTestHelper
from tests.providers.google.cloud.utils.gcp_authenticator import GCP_GCS_KEY
from tests.test_utils.gcp_system_helpers import CLOUD_DAG_FOLDER, provide_gcp_context, skip_gcp_system
from tests.test_utils.system_tests_class import SystemTest
@skip_gcp_system(GCP_GCS_KEY)
class SFTPToGcsExampleDagsSystemTest(SystemTest):
"""
System tests for SFTP to Google Cloud Storage transfer operator
It use a real service.
"""
helper = SFTPtoGcsTestHelper()
@provide_gcp_context(GCP_GCS_KEY)
def setUp(self):
super().setUp()
self.helper.create_buckets()
self.helper.create_temp_files()
@provide_gcp_context(GCP_GCS_KEY)
def test_run_example_dag(self):
self.run_dag("example_sftp_to_gcs", CLOUD_DAG_FOLDER)
@provide_gcp_context(GCP_GCS_KEY)
def tearDown(self):
self.helper.delete_buckets()
self.helper.delete_temp_files()
super().tearDown()
|
apache-2.0
|
Azure/azure-sdk-for-python
|
sdk/securityinsight/azure-mgmt-securityinsight/azure/mgmt/securityinsight/aio/operations/_alert_rule_templates_operations.py
|
1
|
9059
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AlertRuleTemplatesOperations:
"""AlertRuleTemplatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.securityinsight.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name: str,
workspace_name: str,
**kwargs
) -> AsyncIterable["models.AlertRuleTemplatesList"]:
"""Gets all alert rule templates.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AlertRuleTemplatesList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.securityinsight.models.AlertRuleTemplatesList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AlertRuleTemplatesList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AlertRuleTemplatesList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRuleTemplates'} # type: ignore
async def get(
self,
resource_group_name: str,
workspace_name: str,
alert_rule_template_id: str,
**kwargs
) -> "models.AlertRuleTemplate":
"""Gets the alert rule template.
:param resource_group_name: The name of the resource group within the user's subscription. The
name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param alert_rule_template_id: Alert rule template ID.
:type alert_rule_template_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AlertRuleTemplate, or the result of cls(response)
:rtype: ~azure.mgmt.securityinsight.models.AlertRuleTemplate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.AlertRuleTemplate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-01-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', pattern=r'^[0-9A-Fa-f]{8}-([0-9A-Fa-f]{4}-){3}[0-9A-Fa-f]{12}$'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'workspaceName': self._serialize.url("workspace_name", workspace_name, 'str', max_length=90, min_length=1),
'alertRuleTemplateId': self._serialize.url("alert_rule_template_id", alert_rule_template_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AlertRuleTemplate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.OperationalInsights/workspaces/{workspaceName}/providers/Microsoft.SecurityInsights/alertRuleTemplates/{alertRuleTemplateId}'} # type: ignore
|
mit
|
osamirabo/kaggle-youtube
|
convert_prediction_from_json_to_csv.py
|
15
|
3247
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility to convert the output of batch prediction into a CSV submission.
It converts the JSON files created by the command
'gcloud beta ml jobs submit prediction' into a CSV file ready for submission.
"""
import json
import tensorflow as tf
from builtins import range
from tensorflow import app
from tensorflow import flags
from tensorflow import gfile
from tensorflow import logging
FLAGS = flags.FLAGS
if __name__ == '__main__':
flags.DEFINE_string(
"json_prediction_files_pattern", None,
"Pattern specifying the list of JSON files that the command "
"'gcloud beta ml jobs submit prediction' outputs. These files are "
"located in the output path of the prediction command and are prefixed "
"with 'prediction.results'.")
flags.DEFINE_string(
"csv_output_file", None,
"The file to save the predictions converted to the CSV format.")
def get_csv_header():
return "VideoId,LabelConfidencePairs\n"
def to_csv_row(json_data):
video_id = json_data["video_id"]
class_indexes = json_data["class_indexes"]
predictions = json_data["predictions"]
if isinstance(video_id, list):
video_id = video_id[0]
class_indexes = class_indexes[0]
predictions = predictions[0]
if len(class_indexes) != len(predictions):
raise ValueError(
"The number of indexes (%s) and predictions (%s) must be equal."
% (len(class_indexes), len(predictions)))
return (video_id.decode('utf-8') + "," + " ".join("%i %f" %
(class_indexes[i], predictions[i])
for i in range(len(class_indexes))) + "\n")
def main(unused_argv):
logging.set_verbosity(tf.logging.INFO)
if not FLAGS.json_prediction_files_pattern:
raise ValueError(
"The flag --json_prediction_files_pattern must be specified.")
if not FLAGS.csv_output_file:
raise ValueError("The flag --csv_output_file must be specified.")
logging.info("Looking for prediction files with pattern: %s",
FLAGS.json_prediction_files_pattern)
file_paths = gfile.Glob(FLAGS.json_prediction_files_pattern)
logging.info("Found files: %s", file_paths)
logging.info("Writing submission file to: %s", FLAGS.csv_output_file)
with gfile.Open(FLAGS.csv_output_file, "w+") as output_file:
output_file.write(get_csv_header())
for file_path in file_paths:
logging.info("processing file: %s", file_path)
with gfile.Open(file_path) as input_file:
for line in input_file:
json_data = json.loads(line)
output_file.write(to_csv_row(json_data))
output_file.flush()
logging.info("done")
if __name__ == "__main__":
app.run()
|
apache-2.0
|
scottlittle/nolearn
|
nolearn/lasagne/visualize.py
|
3
|
7850
|
from itertools import product
from lasagne.layers import get_output
import matplotlib.pyplot as plt
import numpy as np
import theano
import theano.tensor as T
def plot_loss(net):
train_loss = [row['train_loss'] for row in net.train_history_]
valid_loss = [row['valid_loss'] for row in net.train_history_]
plt.plot(train_loss, label='train loss')
plt.plot(valid_loss, label='valid loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
def plot_conv_weights(layer, figsize=(6, 6)):
"""Plot the weights of a specific layer.
Only really makes sense with convolutional layers.
Parameters
----------
layer : lasagne.layers.Layer
"""
W = layer.W.get_value()
shape = W.shape
nrows = np.ceil(np.sqrt(shape[0])).astype(int)
ncols = nrows
for feature_map in range(shape[1]):
figs, axes = plt.subplots(nrows, ncols, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[0]:
break
axes[r, c].imshow(W[i, feature_map], cmap='gray',
interpolation='nearest')
def plot_conv_activity(layer, x, figsize=(6, 8)):
"""Plot the acitivities of a specific layer.
Only really makes sense with layers that work 2D data (2D
convolutional layers, 2D pooling layers ...).
Parameters
----------
layer : lasagne.layers.Layer
x : numpy.ndarray
Only takes one sample at a time, i.e. x.shape[0] == 1.
"""
if x.shape[0] != 1:
raise ValueError("Only one sample can be plotted at a time.")
# compile theano function
xs = T.tensor4('xs').astype(theano.config.floatX)
get_activity = theano.function([xs], get_output(layer, xs))
activity = get_activity(x)
shape = activity.shape
nrows = np.ceil(np.sqrt(shape[1])).astype(int)
ncols = nrows
figs, axes = plt.subplots(nrows + 1, ncols, figsize=figsize)
axes[0, ncols // 2].imshow(1 - x[0][0], cmap='gray',
interpolation='nearest')
axes[0, ncols // 2].set_title('original')
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for i, (r, c) in enumerate(product(range(nrows), range(ncols))):
if i >= shape[1]:
break
ndim = activity[0][i].ndim
if ndim != 2:
raise ValueError("Wrong number of dimensions, image data should "
"have 2, instead got {}".format(ndim))
axes[r + 1, c].imshow(-activity[0][i], cmap='gray',
interpolation='nearest')
def occlusion_heatmap(net, x, target, square_length=7):
"""An occlusion test that checks an image for its critical parts.
In this function, a square part of the image is occluded (i.e. set
to 0) and then the net is tested for its propensity to predict the
correct label. One should expect that this propensity shrinks of
critical parts of the image are occluded. If not, this indicates
overfitting.
Depending on the depth of the net and the size of the image, this
function may take awhile to finish, since one prediction for each
pixel of the image is made.
Currently, all color channels are occluded at the same time. Also,
this does not really work if images are randomly distorted by the
batch iterator.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
x : np.array
The input data, should be of shape (1, c, x, y). Only makes
sense with image data.
target : int
The true value of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
Results
-------
heat_array : np.array (with same size as image)
An 2D np.array that at each point (i, j) contains the predicted
probability of the correct class if the image is occluded by a
square with center (i, j).
"""
if (x.ndim != 4) or x.shape[0] != 1:
raise ValueError("This function requires the input data to be of "
"shape (1, c, x, y), instead got {}".format(x.shape))
if square_length % 2 == 0:
raise ValueError("Square length has to be an odd number, instead "
"got {}.".format(square_length))
num_classes = net.layers_[-1].num_units
img = x[0].copy()
shape = x.shape
heat_array = np.zeros(shape[2:])
pad = square_length // 2 + 1
x_occluded = np.zeros((shape[2], shape[3], shape[2], shape[3]),
dtype=img.dtype)
# generate occluded images
for i, j in product(*map(range, shape[2:])):
x_padded = np.pad(img, ((0, 0), (pad, pad), (pad, pad)), 'constant')
x_padded[:, i:i + square_length, j:j + square_length] = 0.
x_occluded[i, j, :, :] = x_padded[:, pad:-pad, pad:-pad]
# make batch predictions for each occluded image
probs = np.zeros((shape[2], shape[3], num_classes))
for i in range(shape[3]):
y_proba = net.predict_proba(x_occluded[:, i:i + 1, :, :])
probs[:, i:i + 1, :] = y_proba.reshape(shape[2], 1, num_classes)
# from predicted probabilities, pick only those of target class
for i, j in product(*map(range, shape[2:])):
heat_array[i, j] = probs[i, j, target]
return heat_array
def plot_occlusion(net, X, target, square_length=7, figsize=(9, None)):
"""Plot which parts of an image are particularly import for the
net to classify the image correctly.
See paper: Zeiler, Fergus 2013
Parameters
----------
net : NeuralNet instance
The neural net to test.
X : numpy.array
The input data, should be of shape (b, c, 0, 1). Only makes
sense with image data.
target : list or numpy.array of ints
The true values of the image. If the net makes several
predictions, say 10 classes, this indicates which one to look
at. If more than one sample is passed to X, each of them needs
its own target.
square_length : int (default=7)
The length of the side of the square that occludes the image.
Must be an odd number.
figsize : tuple (int, int)
Size of the figure.
Plots
-----
Figre with 3 subplots: the original image, the occlusion heatmap,
and both images super-imposed.
"""
if (X.ndim != 4):
raise ValueError("This function requires the input data to be of "
"shape (b, c, x, y), instead got {}".format(X.shape))
num_images = X.shape[0]
if figsize[1] is None:
figsize = (figsize[0], num_images * figsize[0] / 3)
figs, axes = plt.subplots(num_images, 3, figsize=figsize)
for ax in axes.flatten():
ax.set_xticks([])
ax.set_yticks([])
ax.axis('off')
for n in range(num_images):
heat_img = occlusion_heatmap(
net, X[n:n + 1, :, :, :], target[n], square_length
)
ax = axes if num_images == 1 else axes[n]
img = X[n, :, :, :].mean(0)
ax[0].imshow(-img, interpolation='nearest', cmap='gray')
ax[0].set_title('image')
ax[1].imshow(-heat_img, interpolation='nearest', cmap='Reds')
ax[1].set_title('critical parts')
ax[2].imshow(-img, interpolation='nearest', cmap='gray')
ax[2].imshow(-heat_img, interpolation='nearest', cmap='Reds',
alpha=0.6)
ax[2].set_title('super-imposed')
|
mit
|
NeCTAR-RC/horizon
|
openstack_dashboard/dashboards/admin/networks/forms.py
|
1
|
15955
|
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
# Predefined provider network types.
# You can add or override these entries by extra_provider_types
# in the settings.
PROVIDER_TYPES = {
'local': {
'display_name': _('Local'),
'require_physical_network': False,
'require_segmentation_id': False,
},
'flat': {
'display_name': _('Flat'),
'require_physical_network': True,
'require_segmentation_id': False,
},
'vlan': {
'display_name': _('VLAN'),
'require_physical_network': True,
'require_segmentation_id': True,
},
'gre': {
'display_name': _('GRE'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'vxlan': {
'display_name': _('VXLAN'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'geneve': {
'display_name': _('Geneve'),
'require_physical_network': False,
'require_segmentation_id': True,
},
'midonet': {
'display_name': _('MidoNet'),
'require_physical_network': False,
'require_segmentation_id': False,
},
'uplink': {
'display_name': _('MidoNet Uplink'),
'require_physical_network': False,
'require_segmentation_id': False,
},
}
# Predefined valid segmentation ID range per network type.
# You can add or override these entries by segmentation_id_range
# in the settings.
SEGMENTATION_ID_RANGE = {
'vlan': (1, 4094),
'gre': (1, (2 ** 32) - 1),
'vxlan': (1, (2 ** 24) - 1),
'geneve': (1, (2 ** 24) - 1),
}
# DEFAULT_PROVIDER_TYPES is used when ['*'] is specified
# in supported_provider_types. This list contains network types
# supported by Neutron ML2 plugin reference implementation.
# You can control enabled network types by
# supported_provider_types setting.
DEFAULT_PROVIDER_TYPES = ['local', 'flat', 'vlan', 'gre', 'vxlan', 'geneve']
class CreateNetwork(forms.SelfHandlingForm):
name = forms.CharField(max_length=255,
label=_("Name"),
required=False)
tenant_id = forms.ThemableChoiceField(label=_("Project"))
network_type = forms.ChoiceField(
label=_("Provider Network Type"),
help_text=_("The physical mechanism by which the virtual "
"network is implemented."),
widget=forms.ThemableSelectWidget(attrs={
'class': 'switchable',
'data-slug': 'network_type'
}))
physical_network = forms.CharField(
max_length=255,
label=_("Physical Network"),
help_text=_("The name of the physical network over which the "
"virtual network is implemented. Specify one of the "
"physical networks defined in your neutron deployment."),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'network_type',
}))
segmentation_id = forms.IntegerField(
label=_("Segmentation ID"),
widget=forms.TextInput(attrs={
'class': 'switched',
'data-switch-on': 'network_type',
}))
admin_state = forms.BooleanField(label=_("Enable Admin State"),
initial=True,
required=False)
shared = forms.BooleanField(label=_("Shared"),
initial=False, required=False)
external = forms.BooleanField(label=_("External Network"),
initial=False, required=False)
with_subnet = forms.BooleanField(label=_("Create Subnet"),
widget=forms.CheckboxInput(attrs={
'class': 'switchable',
'data-slug': 'with_subnet',
'data-hide-tab': 'create_network__'
'createsubnetinfo'
'action,'
'create_network__'
'createsubnetdetail'
'action',
'data-hide-on-checked': 'false'
}),
initial=True,
required=False)
az_hints = forms.MultipleChoiceField(
label=_("Availability Zone Hints"),
required=False,
help_text=_("Availability zones where the DHCP agents may be "
"scheduled. Leaving this unset is equivalent to "
"selecting all availability zones"))
@classmethod
def _instantiate(cls, request, *args, **kwargs):
return cls(request, *args, **kwargs)
def __init__(self, request, *args, **kwargs):
super(CreateNetwork, self).__init__(request, *args, **kwargs)
tenant_choices = [('', _("Select a project"))]
tenants, has_more = api.keystone.tenant_list(request)
for tenant in tenants:
if tenant.enabled:
tenant_choices.append((tenant.id, tenant.name))
self.fields['tenant_id'].choices = tenant_choices
try:
is_extension_supported = \
api.neutron.is_extension_supported(request, 'provider')
except Exception:
msg = _("Unable to verify Neutron service providers")
exceptions.handle(self.request, msg)
self._hide_provider_network_type()
is_extension_supported = False
if is_extension_supported:
neutron_settings = getattr(settings,
'OPENSTACK_NEUTRON_NETWORK', {})
self.seg_id_range = SEGMENTATION_ID_RANGE.copy()
seg_id_range = neutron_settings.get('segmentation_id_range')
if seg_id_range:
self.seg_id_range.update(seg_id_range)
self.provider_types = PROVIDER_TYPES.copy()
extra_provider_types = neutron_settings.get('extra_provider_types')
if extra_provider_types:
self.provider_types.update(extra_provider_types)
self.nettypes_with_seg_id = [
net_type for net_type in self.provider_types
if self.provider_types[net_type]['require_segmentation_id']]
self.nettypes_with_physnet = [
net_type for net_type in self.provider_types
if self.provider_types[net_type]['require_physical_network']]
supported_provider_types = neutron_settings.get(
'supported_provider_types', DEFAULT_PROVIDER_TYPES)
if supported_provider_types == ['*']:
supported_provider_types = DEFAULT_PROVIDER_TYPES
undefined_provider_types = [
net_type for net_type in supported_provider_types
if net_type not in self.provider_types]
if undefined_provider_types:
LOG.error('Undefined provider network types are found: %s',
undefined_provider_types)
seg_id_help = [
_("For %(type)s networks, valid IDs are %(min)s to %(max)s.")
% {'type': net_type,
'min': self.seg_id_range[net_type][0],
'max': self.seg_id_range[net_type][1]}
for net_type in self.nettypes_with_seg_id]
self.fields['segmentation_id'].help_text = ' '.join(seg_id_help)
# Register network types which require segmentation ID
attrs = dict(('data-network_type-%s' % network_type,
_('Segmentation ID'))
for network_type in self.nettypes_with_seg_id)
self.fields['segmentation_id'].widget.attrs.update(attrs)
physical_networks = getattr(settings,
'OPENSTACK_NEUTRON_NETWORK', {}
).get('physical_networks', [])
if physical_networks:
self.fields['physical_network'] = forms.ThemableChoiceField(
label=_("Physical Network"),
choices=[(net, net) for net in physical_networks],
widget=forms.ThemableSelectWidget(attrs={
'class': 'switched',
'data-switch-on': 'network_type',
}),
help_text=_("The name of the physical network over "
"which the virtual network is implemented."),)
# Register network types which require physical network
attrs = dict(('data-network_type-%s' % network_type,
_('Physical Network'))
for network_type in self.nettypes_with_physnet)
self.fields['physical_network'].widget.attrs.update(attrs)
network_type_choices = [
(net_type, self.provider_types[net_type]['display_name'])
for net_type in supported_provider_types]
if not network_type_choices:
self._hide_provider_network_type()
else:
self.fields['network_type'].choices = network_type_choices
try:
if api.neutron.is_extension_supported(request,
'network_availability_zone'):
zones = api.neutron.list_availability_zones(
self.request, 'network', 'available')
self.fields['az_hints'].choices = [(zone['name'], zone['name'])
for zone in zones]
else:
del self.fields['az_hints']
except Exception:
msg = _('Failed to get availability zone list.')
messages.warning(request, msg)
del self.fields['az_hints']
def _hide_provider_network_type(self):
self.fields['network_type'].widget = forms.HiddenInput()
self.fields['physical_network'].widget = forms.HiddenInput()
self.fields['segmentation_id'].widget = forms.HiddenInput()
self.fields['network_type'].required = False
self.fields['physical_network'].required = False
self.fields['segmentation_id'].required = False
def handle(self, request, data):
try:
params = {'name': data['name'],
'tenant_id': data['tenant_id'],
'admin_state_up': data['admin_state'],
'shared': data['shared'],
'router:external': data['external']}
if api.neutron.is_extension_supported(request, 'provider'):
network_type = data['network_type']
params['provider:network_type'] = network_type
if network_type in self.nettypes_with_physnet:
params['provider:physical_network'] = (
data['physical_network'])
if network_type in self.nettypes_with_seg_id:
params['provider:segmentation_id'] = (
data['segmentation_id'])
if 'az_hints' in data and data['az_hints']:
params['availability_zone_hints'] = data['az_hints']
network = api.neutron.network_create(request, **params)
LOG.debug('Network %s was successfully created.', data['name'])
return network
except Exception:
redirect = reverse('horizon:admin:networks:index')
msg = _('Failed to create network %s') % data['name']
exceptions.handle(request, msg, redirect=redirect)
def clean(self):
cleaned_data = super(CreateNetwork, self).clean()
if api.neutron.is_extension_supported(self.request, 'provider'):
self._clean_physical_network(cleaned_data)
self._clean_segmentation_id(cleaned_data)
return cleaned_data
def _clean_physical_network(self, data):
network_type = data.get('network_type')
if ('physical_network' in self._errors and
network_type not in self.nettypes_with_physnet):
# In this case the physical network is not required, so we can
# ignore any errors.
del self._errors['physical_network']
def _clean_segmentation_id(self, data):
network_type = data.get('network_type')
if 'segmentation_id' in self._errors:
if (network_type not in self.nettypes_with_seg_id and
not self.data.get("segmentation_id")):
# In this case the segmentation ID is not required, so we can
# ignore the field is required error.
del self._errors['segmentation_id']
elif network_type in self.nettypes_with_seg_id:
seg_id = data.get('segmentation_id')
seg_id_range = {'min': self.seg_id_range[network_type][0],
'max': self.seg_id_range[network_type][1]}
if seg_id < seg_id_range['min'] or seg_id > seg_id_range['max']:
msg = (_('For a %(network_type)s network, valid segmentation '
'IDs are %(min)s through %(max)s.')
% {'network_type': network_type,
'min': seg_id_range['min'],
'max': seg_id_range['max']})
self._errors['segmentation_id'] = self.error_class([msg])
class UpdateNetwork(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False)
admin_state = forms.BooleanField(label=_("Enable Admin State"),
required=False)
shared = forms.BooleanField(label=_("Shared"), required=False)
external = forms.BooleanField(label=_("External Network"), required=False)
failure_url = 'horizon:admin:networks:index'
def handle(self, request, data):
try:
params = {'name': data['name'],
'admin_state_up': data['admin_state'],
'shared': data['shared'],
'router:external': data['external']}
network = api.neutron.network_update(request,
self.initial['network_id'],
**params)
msg = (_('Network %s was successfully updated.') %
network.name_or_id)
messages.success(request, msg)
return network
except Exception as e:
LOG.info('Failed to update network %(id)s: %(exc)s',
{'id': self.initial['network_id'], 'exc': e})
msg = _('Failed to update network %s') % data['name']
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
apache-2.0
|
ucldc/harvester
|
scripts/external-redirect-get-solr_prod-id.py
|
1
|
4313
|
#! /bin/env python
# -*- coding: utf-8 -*-
# Use when Calisphere Object URLs for a collection change, to generate
# a redirect file mapping 'old' (on SOLR-PROD) to 'new' (on SOLR-TEST) URLs.
#
# This script takes a Collection ID and a 'match' field in SOLR (i.e. best
# field to use for matching SOLR-PROD record to corresponding SOLR-TEST)
# and generates a JSON file containing the SOLR-PROD Solr ID value and
# 'match' field value for each object in given Collection, to use as input
# for external-redirect-generate-URL-redirect-map.py
import os
import argparse
import json
import requests
import solr
# to get rid of ssl key warning
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
URL_REGISTRY_API='https://registry.cdlib.org/api/v1/collection/'
SOLR_URL='https://solr.calisphere.org/solr/'
SOLR_API_KEY = os.environ.get('SOLR_API_KEY', '')
def get_solr_id(cid, matchVal, url_solr=SOLR_URL, api_key=SOLR_API_KEY):
solr_auth = { 'X-Authentication-Token': api_key } if api_key else None
url_collection = URL_REGISTRY_API + cid + '/'
query = { 'q': 'collection_url:{}'.format(url_collection), 'rows':1000000, 'fl': 'id,{}'.format(matchVal)}
solr_endpoint = url_solr + 'query'
print "Getting ids from : {}\n{}".format(solr_endpoint, query)
resp_obj = requests.get(solr_endpoint,
headers=solr_auth,
params=query,
verify=False)
results = resp_obj.json()
if results:
with open('prod-URLs-{}.json'.format(cid), 'w') as foo:
foo.write(json.dumps(results, sort_keys=True, indent=4))
def main(cid, matchVal):
get_solr_id(cid, matchVal)
if __name__=='__main__':
parser = argparse.ArgumentParser('This script takes a Collection ID ' \
'and a "match" field in SOLR (i.e. best field to use for matching SOLR-PROD ' \
'record to corresponding SOLR-TEST) and generates a JSON file containing ' \
'the SOLR-PROD Solr ID value and "match" field value for each object in ' \
'given Collection, to use as input for ' \
'external-redirect-generate-redirect-map.py' \
'\nUsage: external-redirect-get-solr_prod-id.py [Collection ID] [match field]' )
parser.add_argument('cid')
parser.add_argument('matchVal')
argv = parser.parse_args()
if not argv.cid:
raise Exception(
"Please include valid Registry Collection ID")
if not argv.matchVal:
raise Exception(
"Please include valid SOLR metadata match field")
print "CID: {} MATCH FIELD: {}".format(
argv.cid,
argv.matchVal)
main(argv.cid, argv.matchVal)
# Copyright © 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
|
bsd-3-clause
|
rodrigosurita/GDAd
|
sdaps/setup/pdftools/pdfpath.py
|
1
|
2761
|
# pdftools - A library of classes for parsing and rendering PDF documents.
# Copyright(C) 2001-2008 by David Boddie
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or(at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# Created: 2003
"""
pdfpath.py
Classes for representing path information in PDF documents.
Path state command support.
"""
class Path:
def __init__(self, subpaths, clipping, painting):
self.subpaths = subpaths
self.clipping = clipping
self.painting = painting
class Subpath:
def __init__(self, contents):
self.contents = contents
class Move:
def __init__(self, point):
self.point = point
def __repr__(self):
return "<Move: %s>" % repr(self.point)
class Line:
def __init__(self, point1, point2):
self.point1 = point1
self.point2 = point2
def __repr__(self):
return "<Line: from %s to %s>" %( repr(self.point1), repr(self.point2) )
class Bezier:
def __init__(self, point1, control1, control2, point2):
self.point1 = point1
self.control1 = control1
self.control2 = control2
self.point2 = point2
def __repr__(self):
return "<Bezier: from %s to %s; control points: %s %s>" %(
repr(self.point1), repr(self.point2),
repr(self.control1), repr(self.control2)
)
class Rectangle:
def __init__(self, point, width, height):
self.point = point
self.width = width
self.height = height
def __repr__(self):
return "<Rectangle: origin: %s; dimensions: %.3f x %.3f>" %(
repr(self.point), self.width, self.height
)
class Close:
pass
class Clip:
def __init__(self, winding):
self.winding = winding
def __repr__(self):
return "<Clip: %s rule>" % self.winding
# Painting operations
class Stroke:
pass
class Fill:
def __init__(self, winding):
self.winding = winding
def __repr__(self):
return "<Fill: %s rule>" % self.winding
class Gradient:
def __init__(self, name):
self.name = name
|
gpl-3.0
|
Sparkey67/android_kernel_lge_g3
|
tools/perf/util/setup.py
|
4998
|
1330
|
#!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
|
gpl-2.0
|
italomandara/JZ80
|
node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/input.py
|
53
|
115735
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from compiler.ast import Const
from compiler.ast import Dict
from compiler.ast import Discard
from compiler.ast import List
from compiler.ast import Module
from compiler.ast import Node
from compiler.ast import Stmt
import compiler
import gyp.common
import gyp.simple_copy
import multiprocessing
import optparse
import os.path
import re
import shlex
import signal
import subprocess
import sys
import threading
import time
import traceback
from gyp.common import GypError
from gyp.common import OrderedSet
# A list of types that are treated as linkable.
linkable_types = ['executable', 'shared_library', 'loadable_module']
# A list of sections that contain links to other targets.
dependency_sections = ['dependencies', 'export_dependent_settings']
# base_path_sections is a list of sections defined by GYP that contain
# pathnames. The generators can provide more keys, the two lists are merged
# into path_sections, but you should call IsPathSection instead of using either
# list directly.
base_path_sections = [
'destination',
'files',
'include_dirs',
'inputs',
'libraries',
'outputs',
'sources',
]
path_sections = set()
# These per-process dictionaries are used to cache build file data when loading
# in parallel mode.
per_process_data = {}
per_process_aux_data = {}
def IsPathSection(section):
# If section ends in one of the '=+?!' characters, it's applied to a section
# without the trailing characters. '/' is notably absent from this list,
# because there's no way for a regular expression to be treated as a path.
while section and section[-1:] in '=+?!':
section = section[:-1]
if section in path_sections:
return True
# Sections mathing the regexp '_(dir|file|path)s?$' are also
# considered PathSections. Using manual string matching since that
# is much faster than the regexp and this can be called hundreds of
# thousands of times so micro performance matters.
if "_" in section:
tail = section[-6:]
if tail[-1] == 's':
tail = tail[:-1]
if tail[-5:] in ('_file', '_path'):
return True
return tail[-4:] == '_dir'
return False
# base_non_configuration_keys is a list of key names that belong in the target
# itself and should not be propagated into its configurations. It is merged
# with a list that can come from the generator to
# create non_configuration_keys.
base_non_configuration_keys = [
# Sections that must exist inside targets and not configurations.
'actions',
'configurations',
'copies',
'default_configuration',
'dependencies',
'dependencies_original',
'libraries',
'postbuilds',
'product_dir',
'product_extension',
'product_name',
'product_prefix',
'rules',
'run_as',
'sources',
'standalone_static_library',
'suppress_wildcard',
'target_name',
'toolset',
'toolsets',
'type',
# Sections that can be found inside targets or configurations, but that
# should not be propagated from targets into their configurations.
'variables',
]
non_configuration_keys = []
# Keys that do not belong inside a configuration dictionary.
invalid_configuration_keys = [
'actions',
'all_dependent_settings',
'configurations',
'dependencies',
'direct_dependent_settings',
'libraries',
'link_settings',
'sources',
'standalone_static_library',
'target_name',
'type',
]
# Controls whether or not the generator supports multiple toolsets.
multiple_toolsets = False
# Paths for converting filelist paths to output paths: {
# toplevel,
# qualified_output_dir,
# }
generator_filelist_paths = None
def GetIncludedBuildFiles(build_file_path, aux_data, included=None):
"""Return a list of all build files included into build_file_path.
The returned list will contain build_file_path as well as all other files
that it included, either directly or indirectly. Note that the list may
contain files that were included into a conditional section that evaluated
to false and was not merged into build_file_path's dict.
aux_data is a dict containing a key for each build file or included build
file. Those keys provide access to dicts whose "included" keys contain
lists of all other files included by the build file.
included should be left at its default None value by external callers. It
is used for recursion.
The returned list will not contain any duplicate entries. Each build file
in the list will be relative to the current directory.
"""
if included == None:
included = []
if build_file_path in included:
return included
included.append(build_file_path)
for included_build_file in aux_data[build_file_path].get('included', []):
GetIncludedBuildFiles(included_build_file, aux_data, included)
return included
def CheckedEval(file_contents):
"""Return the eval of a gyp file.
The gyp file is restricted to dictionaries and lists only, and
repeated keys are not allowed.
Note that this is slower than eval() is.
"""
ast = compiler.parse(file_contents)
assert isinstance(ast, Module)
c1 = ast.getChildren()
assert c1[0] is None
assert isinstance(c1[1], Stmt)
c2 = c1[1].getChildren()
assert isinstance(c2[0], Discard)
c3 = c2[0].getChildren()
assert len(c3) == 1
return CheckNode(c3[0], [])
def CheckNode(node, keypath):
if isinstance(node, Dict):
c = node.getChildren()
dict = {}
for n in range(0, len(c), 2):
assert isinstance(c[n], Const)
key = c[n].getChildren()[0]
if key in dict:
raise GypError("Key '" + key + "' repeated at level " +
repr(len(keypath) + 1) + " with key path '" +
'.'.join(keypath) + "'")
kp = list(keypath) # Make a copy of the list for descending this node.
kp.append(key)
dict[key] = CheckNode(c[n + 1], kp)
return dict
elif isinstance(node, List):
c = node.getChildren()
children = []
for index, child in enumerate(c):
kp = list(keypath) # Copy list.
kp.append(repr(index))
children.append(CheckNode(child, kp))
return children
elif isinstance(node, Const):
return node.getChildren()[0]
else:
raise TypeError("Unknown AST node at key path '" + '.'.join(keypath) +
"': " + repr(node))
def LoadOneBuildFile(build_file_path, data, aux_data, includes,
is_target, check):
if build_file_path in data:
return data[build_file_path]
if os.path.exists(build_file_path):
build_file_contents = open(build_file_path).read()
else:
raise GypError("%s not found (cwd: %s)" % (build_file_path, os.getcwd()))
build_file_data = None
try:
if check:
build_file_data = CheckedEval(build_file_contents)
else:
build_file_data = eval(build_file_contents, {'__builtins__': None},
None)
except SyntaxError, e:
e.filename = build_file_path
raise
except Exception, e:
gyp.common.ExceptionAppend(e, 'while reading ' + build_file_path)
raise
if type(build_file_data) is not dict:
raise GypError("%s does not evaluate to a dictionary." % build_file_path)
data[build_file_path] = build_file_data
aux_data[build_file_path] = {}
# Scan for includes and merge them in.
if ('skip_includes' not in build_file_data or
not build_file_data['skip_includes']):
try:
if is_target:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, includes, check)
else:
LoadBuildFileIncludesIntoDict(build_file_data, build_file_path, data,
aux_data, None, check)
except Exception, e:
gyp.common.ExceptionAppend(e,
'while reading includes of ' + build_file_path)
raise
return build_file_data
def LoadBuildFileIncludesIntoDict(subdict, subdict_path, data, aux_data,
includes, check):
includes_list = []
if includes != None:
includes_list.extend(includes)
if 'includes' in subdict:
for include in subdict['includes']:
# "include" is specified relative to subdict_path, so compute the real
# path to include by appending the provided "include" to the directory
# in which subdict_path resides.
relative_include = \
os.path.normpath(os.path.join(os.path.dirname(subdict_path), include))
includes_list.append(relative_include)
# Unhook the includes list, it's no longer needed.
del subdict['includes']
# Merge in the included files.
for include in includes_list:
if not 'included' in aux_data[subdict_path]:
aux_data[subdict_path]['included'] = []
aux_data[subdict_path]['included'].append(include)
gyp.DebugOutput(gyp.DEBUG_INCLUDES, "Loading Included File: '%s'", include)
MergeDicts(subdict,
LoadOneBuildFile(include, data, aux_data, None, False, check),
subdict_path, include)
# Recurse into subdictionaries.
for k, v in subdict.iteritems():
if type(v) is dict:
LoadBuildFileIncludesIntoDict(v, subdict_path, data, aux_data,
None, check)
elif type(v) is list:
LoadBuildFileIncludesIntoList(v, subdict_path, data, aux_data,
check)
# This recurses into lists so that it can look for dicts.
def LoadBuildFileIncludesIntoList(sublist, sublist_path, data, aux_data, check):
for item in sublist:
if type(item) is dict:
LoadBuildFileIncludesIntoDict(item, sublist_path, data, aux_data,
None, check)
elif type(item) is list:
LoadBuildFileIncludesIntoList(item, sublist_path, data, aux_data, check)
# Processes toolsets in all the targets. This recurses into condition entries
# since they can contain toolsets as well.
def ProcessToolsetsInDict(data):
if 'targets' in data:
target_list = data['targets']
new_target_list = []
for target in target_list:
# If this target already has an explicit 'toolset', and no 'toolsets'
# list, don't modify it further.
if 'toolset' in target and 'toolsets' not in target:
new_target_list.append(target)
continue
if multiple_toolsets:
toolsets = target.get('toolsets', ['target'])
else:
toolsets = ['target']
# Make sure this 'toolsets' definition is only processed once.
if 'toolsets' in target:
del target['toolsets']
if len(toolsets) > 0:
# Optimization: only do copies if more than one toolset is specified.
for build in toolsets[1:]:
new_target = gyp.simple_copy.deepcopy(target)
new_target['toolset'] = build
new_target_list.append(new_target)
target['toolset'] = toolsets[0]
new_target_list.append(target)
data['targets'] = new_target_list
if 'conditions' in data:
for condition in data['conditions']:
if type(condition) is list:
for condition_dict in condition[1:]:
if type(condition_dict) is dict:
ProcessToolsetsInDict(condition_dict)
# TODO(mark): I don't love this name. It just means that it's going to load
# a build file that contains targets and is expected to provide a targets dict
# that contains the targets...
def LoadTargetBuildFile(build_file_path, data, aux_data, variables, includes,
depth, check, load_dependencies):
# If depth is set, predefine the DEPTH variable to be a relative path from
# this build file's directory to the directory identified by depth.
if depth:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
d = gyp.common.RelativePath(depth, os.path.dirname(build_file_path))
if d == '':
variables['DEPTH'] = '.'
else:
variables['DEPTH'] = d.replace('\\', '/')
# The 'target_build_files' key is only set when loading target build files in
# the non-parallel code path, where LoadTargetBuildFile is called
# recursively. In the parallel code path, we don't need to check whether the
# |build_file_path| has already been loaded, because the 'scheduled' set in
# ParallelState guarantees that we never load the same |build_file_path|
# twice.
if 'target_build_files' in data:
if build_file_path in data['target_build_files']:
# Already loaded.
return False
data['target_build_files'].add(build_file_path)
gyp.DebugOutput(gyp.DEBUG_INCLUDES,
"Loading Target Build File '%s'", build_file_path)
build_file_data = LoadOneBuildFile(build_file_path, data, aux_data,
includes, True, check)
# Store DEPTH for later use in generators.
build_file_data['_DEPTH'] = depth
# Set up the included_files key indicating which .gyp files contributed to
# this target dict.
if 'included_files' in build_file_data:
raise GypError(build_file_path + ' must not contain included_files key')
included = GetIncludedBuildFiles(build_file_path, aux_data)
build_file_data['included_files'] = []
for included_file in included:
# included_file is relative to the current directory, but it needs to
# be made relative to build_file_path's directory.
included_relative = \
gyp.common.RelativePath(included_file,
os.path.dirname(build_file_path))
build_file_data['included_files'].append(included_relative)
# Do a first round of toolsets expansion so that conditions can be defined
# per toolset.
ProcessToolsetsInDict(build_file_data)
# Apply "pre"/"early" variable expansions and condition evaluations.
ProcessVariablesAndConditionsInDict(
build_file_data, PHASE_EARLY, variables, build_file_path)
# Since some toolsets might have been defined conditionally, perform
# a second round of toolsets expansion now.
ProcessToolsetsInDict(build_file_data)
# Look at each project's target_defaults dict, and merge settings into
# targets.
if 'target_defaults' in build_file_data:
if 'targets' not in build_file_data:
raise GypError("Unable to find targets in build file %s" %
build_file_path)
index = 0
while index < len(build_file_data['targets']):
# This procedure needs to give the impression that target_defaults is
# used as defaults, and the individual targets inherit from that.
# The individual targets need to be merged into the defaults. Make
# a deep copy of the defaults for each target, merge the target dict
# as found in the input file into that copy, and then hook up the
# copy with the target-specific data merged into it as the replacement
# target dict.
old_target_dict = build_file_data['targets'][index]
new_target_dict = gyp.simple_copy.deepcopy(
build_file_data['target_defaults'])
MergeDicts(new_target_dict, old_target_dict,
build_file_path, build_file_path)
build_file_data['targets'][index] = new_target_dict
index += 1
# No longer needed.
del build_file_data['target_defaults']
# Look for dependencies. This means that dependency resolution occurs
# after "pre" conditionals and variable expansion, but before "post" -
# in other words, you can't put a "dependencies" section inside a "post"
# conditional within a target.
dependencies = []
if 'targets' in build_file_data:
for target_dict in build_file_data['targets']:
if 'dependencies' not in target_dict:
continue
for dependency in target_dict['dependencies']:
dependencies.append(
gyp.common.ResolveTarget(build_file_path, dependency, None)[0])
if load_dependencies:
for dependency in dependencies:
try:
LoadTargetBuildFile(dependency, data, aux_data, variables,
includes, depth, check, load_dependencies)
except Exception, e:
gyp.common.ExceptionAppend(
e, 'while loading dependencies of %s' % build_file_path)
raise
else:
return (build_file_path, dependencies)
def CallLoadTargetBuildFile(global_flags,
build_file_path, variables,
includes, depth, check,
generator_input_info):
"""Wrapper around LoadTargetBuildFile for parallel processing.
This wrapper is used when LoadTargetBuildFile is executed in
a worker process.
"""
try:
signal.signal(signal.SIGINT, signal.SIG_IGN)
# Apply globals so that the worker process behaves the same.
for key, value in global_flags.iteritems():
globals()[key] = value
SetGeneratorGlobals(generator_input_info)
result = LoadTargetBuildFile(build_file_path, per_process_data,
per_process_aux_data, variables,
includes, depth, check, False)
if not result:
return result
(build_file_path, dependencies) = result
# We can safely pop the build_file_data from per_process_data because it
# will never be referenced by this process again, so we don't need to keep
# it in the cache.
build_file_data = per_process_data.pop(build_file_path)
# This gets serialized and sent back to the main process via a pipe.
# It's handled in LoadTargetBuildFileCallback.
return (build_file_path,
build_file_data,
dependencies)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return None
except Exception, e:
print >>sys.stderr, 'Exception:', e
print >>sys.stderr, traceback.format_exc()
return None
class ParallelProcessingError(Exception):
pass
class ParallelState(object):
"""Class to keep track of state when processing input files in parallel.
If build files are loaded in parallel, use this to keep track of
state during farming out and processing parallel jobs. It's stored
in a global so that the callback function can have access to it.
"""
def __init__(self):
# The multiprocessing pool.
self.pool = None
# The condition variable used to protect this object and notify
# the main loop when there might be more data to process.
self.condition = None
# The "data" dict that was passed to LoadTargetBuildFileParallel
self.data = None
# The number of parallel calls outstanding; decremented when a response
# was received.
self.pending = 0
# The set of all build files that have been scheduled, so we don't
# schedule the same one twice.
self.scheduled = set()
# A list of dependency build file paths that haven't been scheduled yet.
self.dependencies = []
# Flag to indicate if there was an error in a child process.
self.error = False
def LoadTargetBuildFileCallback(self, result):
"""Handle the results of running LoadTargetBuildFile in another process.
"""
self.condition.acquire()
if not result:
self.error = True
self.condition.notify()
self.condition.release()
return
(build_file_path0, build_file_data0, dependencies0) = result
self.data[build_file_path0] = build_file_data0
self.data['target_build_files'].add(build_file_path0)
for new_dependency in dependencies0:
if new_dependency not in self.scheduled:
self.scheduled.add(new_dependency)
self.dependencies.append(new_dependency)
self.pending -= 1
self.condition.notify()
self.condition.release()
def LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info):
parallel_state = ParallelState()
parallel_state.condition = threading.Condition()
# Make copies of the build_files argument that we can modify while working.
parallel_state.dependencies = list(build_files)
parallel_state.scheduled = set(build_files)
parallel_state.pending = 0
parallel_state.data = data
try:
parallel_state.condition.acquire()
while parallel_state.dependencies or parallel_state.pending:
if parallel_state.error:
break
if not parallel_state.dependencies:
parallel_state.condition.wait()
continue
dependency = parallel_state.dependencies.pop()
parallel_state.pending += 1
global_flags = {
'path_sections': globals()['path_sections'],
'non_configuration_keys': globals()['non_configuration_keys'],
'multiple_toolsets': globals()['multiple_toolsets']}
if not parallel_state.pool:
parallel_state.pool = multiprocessing.Pool(multiprocessing.cpu_count())
parallel_state.pool.apply_async(
CallLoadTargetBuildFile,
args = (global_flags, dependency,
variables, includes, depth, check, generator_input_info),
callback = parallel_state.LoadTargetBuildFileCallback)
except KeyboardInterrupt, e:
parallel_state.pool.terminate()
raise e
parallel_state.condition.release()
parallel_state.pool.close()
parallel_state.pool.join()
parallel_state.pool = None
if parallel_state.error:
sys.exit(1)
# Look for the bracket that matches the first bracket seen in a
# string, and return the start and end as a tuple. For example, if
# the input is something like "<(foo <(bar)) blah", then it would
# return (1, 13), indicating the entire string except for the leading
# "<" and trailing " blah".
LBRACKETS= set('{[(')
BRACKETS = {'}': '{', ']': '[', ')': '('}
def FindEnclosingBracketGroup(input_str):
stack = []
start = -1
for index, char in enumerate(input_str):
if char in LBRACKETS:
stack.append(char)
if start == -1:
start = index
elif char in BRACKETS:
if not stack:
return (-1, -1)
if stack.pop() != BRACKETS[char]:
return (-1, -1)
if not stack:
return (start, index + 1)
return (-1, -1)
def IsStrCanonicalInt(string):
"""Returns True if |string| is in its canonical integer form.
The canonical form is such that str(int(string)) == string.
"""
if type(string) is str:
# This function is called a lot so for maximum performance, avoid
# involving regexps which would otherwise make the code much
# shorter. Regexps would need twice the time of this function.
if string:
if string == "0":
return True
if string[0] == "-":
string = string[1:]
if not string:
return False
if '1' <= string[0] <= '9':
return string.isdigit()
return False
# This matches things like "<(asdf)", "<!(cmd)", "<!@(cmd)", "<|(list)",
# "<!interpreter(arguments)", "<([list])", and even "<([)" and "<(<())".
# In the last case, the inner "<()" is captured in match['content'].
early_variable_re = re.compile(
r'(?P<replace>(?P<type><(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '>' instead of '<'.
late_variable_re = re.compile(
r'(?P<replace>(?P<type>>(?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# This matches the same as early_variable_re, but with '^' instead of '<'.
latelate_variable_re = re.compile(
r'(?P<replace>(?P<type>[\^](?:(?:!?@?)|\|)?)'
r'(?P<command_string>[-a-zA-Z0-9_.]+)?'
r'\((?P<is_array>\s*\[?)'
r'(?P<content>.*?)(\]?)\))')
# Global cache of results from running commands so they don't have to be run
# more then once.
cached_command_results = {}
def FixupPlatformCommand(cmd):
if sys.platform == 'win32':
if type(cmd) is list:
cmd = [re.sub('^cat ', 'type ', cmd[0])] + cmd[1:]
else:
cmd = re.sub('^cat ', 'type ', cmd)
return cmd
PHASE_EARLY = 0
PHASE_LATE = 1
PHASE_LATELATE = 2
def ExpandVariables(input, phase, variables, build_file):
# Look for the pattern that gets expanded into variables
if phase == PHASE_EARLY:
variable_re = early_variable_re
expansion_symbol = '<'
elif phase == PHASE_LATE:
variable_re = late_variable_re
expansion_symbol = '>'
elif phase == PHASE_LATELATE:
variable_re = latelate_variable_re
expansion_symbol = '^'
else:
assert False
input_str = str(input)
if IsStrCanonicalInt(input_str):
return int(input_str)
# Do a quick scan to determine if an expensive regex search is warranted.
if expansion_symbol not in input_str:
return input_str
# Get the entire list of matches as a list of MatchObject instances.
# (using findall here would return strings instead of MatchObjects).
matches = list(variable_re.finditer(input_str))
if not matches:
return input_str
output = input_str
# Reverse the list of matches so that replacements are done right-to-left.
# That ensures that earlier replacements won't mess up the string in a
# way that causes later calls to find the earlier substituted text instead
# of what's intended for replacement.
matches.reverse()
for match_group in matches:
match = match_group.groupdict()
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Matches: %r", match)
# match['replace'] is the substring to look for, match['type']
# is the character code for the replacement type (< > <! >! <| >| <@
# >@ <!@ >!@), match['is_array'] contains a '[' for command
# arrays, and match['content'] is the name of the variable (< >)
# or command to run (<! >!). match['command_string'] is an optional
# command string. Currently, only 'pymod_do_main' is supported.
# run_command is true if a ! variant is used.
run_command = '!' in match['type']
command_string = match['command_string']
# file_list is true if a | variant is used.
file_list = '|' in match['type']
# Capture these now so we can adjust them later.
replace_start = match_group.start('replace')
replace_end = match_group.end('replace')
# Find the ending paren, and re-evaluate the contained string.
(c_start, c_end) = FindEnclosingBracketGroup(input_str[replace_start:])
# Adjust the replacement range to match the entire command
# found by FindEnclosingBracketGroup (since the variable_re
# probably doesn't match the entire command if it contained
# nested variables).
replace_end = replace_start + c_end
# Find the "real" replacement, matching the appropriate closing
# paren, and adjust the replacement start and end.
replacement = input_str[replace_start:replace_end]
# Figure out what the contents of the variable parens are.
contents_start = replace_start + c_start + 1
contents_end = replace_end - 1
contents = input_str[contents_start:contents_end]
# Do filter substitution now for <|().
# Admittedly, this is different than the evaluation order in other
# contexts. However, since filtration has no chance to run on <|(),
# this seems like the only obvious way to give them access to filters.
if file_list:
processed_variables = gyp.simple_copy.deepcopy(variables)
ProcessListFiltersInDict(contents, processed_variables)
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase,
processed_variables, build_file)
else:
# Recurse to expand variables in the contents
contents = ExpandVariables(contents, phase, variables, build_file)
# Strip off leading/trailing whitespace so that variable matches are
# simpler below (and because they are rarely needed).
contents = contents.strip()
# expand_to_list is true if an @ variant is used. In that case,
# the expansion should result in a list. Note that the caller
# is to be expecting a list in return, and not all callers do
# because not all are working in list context. Also, for list
# expansions, there can be no other text besides the variable
# expansion in the input string.
expand_to_list = '@' in match['type'] and input_str == replacement
if run_command or file_list:
# Find the build file's directory, so commands can be run or file lists
# generated relative to it.
build_file_dir = os.path.dirname(build_file)
if build_file_dir == '' and not file_list:
# If build_file is just a leaf filename indicating a file in the
# current directory, build_file_dir might be an empty string. Set
# it to None to signal to subprocess.Popen that it should run the
# command in the current directory.
build_file_dir = None
# Support <|(listfile.txt ...) which generates a file
# containing items from a gyp list, generated at gyp time.
# This works around actions/rules which have more inputs than will
# fit on the command line.
if file_list:
if type(contents) is list:
contents_list = contents
else:
contents_list = contents.split(' ')
replacement = contents_list[0]
if os.path.isabs(replacement):
raise GypError('| cannot handle absolute paths, got "%s"' % replacement)
if not generator_filelist_paths:
path = os.path.join(build_file_dir, replacement)
else:
if os.path.isabs(build_file_dir):
toplevel = generator_filelist_paths['toplevel']
rel_build_file_dir = gyp.common.RelativePath(build_file_dir, toplevel)
else:
rel_build_file_dir = build_file_dir
qualified_out_dir = generator_filelist_paths['qualified_out_dir']
path = os.path.join(qualified_out_dir, rel_build_file_dir, replacement)
gyp.common.EnsureDirExists(path)
replacement = gyp.common.RelativePath(path, build_file_dir)
f = gyp.common.WriteOnDiff(path)
for i in contents_list[1:]:
f.write('%s\n' % i)
f.close()
elif run_command:
use_shell = True
if match['is_array']:
contents = eval(contents)
use_shell = False
# Check for a cached value to avoid executing commands, or generating
# file lists more than once. The cache key contains the command to be
# run as well as the directory to run it from, to account for commands
# that depend on their current directory.
# TODO(http://code.google.com/p/gyp/issues/detail?id=111): In theory,
# someone could author a set of GYP files where each time the command
# is invoked it produces different output by design. When the need
# arises, the syntax should be extended to support no caching off a
# command's output so it is run every time.
cache_key = (str(contents), build_file_dir)
cached_value = cached_command_results.get(cache_key, None)
if cached_value is None:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Executing command '%s' in directory '%s'",
contents, build_file_dir)
replacement = ''
if command_string == 'pymod_do_main':
# <!pymod_do_main(modulename param eters) loads |modulename| as a
# python module and then calls that module's DoMain() function,
# passing ["param", "eters"] as a single list argument. For modules
# that don't load quickly, this can be faster than
# <!(python modulename param eters). Do this in |build_file_dir|.
oldwd = os.getcwd() # Python doesn't like os.open('.'): no fchdir.
if build_file_dir: # build_file_dir may be None (see above).
os.chdir(build_file_dir)
try:
parsed_contents = shlex.split(contents)
try:
py_module = __import__(parsed_contents[0])
except ImportError as e:
raise GypError("Error importing pymod_do_main"
"module (%s): %s" % (parsed_contents[0], e))
replacement = str(py_module.DoMain(parsed_contents[1:])).rstrip()
finally:
os.chdir(oldwd)
assert replacement != None
elif command_string:
raise GypError("Unknown command string '%s' in '%s'." %
(command_string, contents))
else:
# Fix up command with platform specific workarounds.
contents = FixupPlatformCommand(contents)
try:
p = subprocess.Popen(contents, shell=use_shell,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE,
cwd=build_file_dir)
except Exception, e:
raise GypError("%s while executing command '%s' in %s" %
(e, contents, build_file))
p_stdout, p_stderr = p.communicate('')
if p.wait() != 0 or p_stderr:
sys.stderr.write(p_stderr)
# Simulate check_call behavior, since check_call only exists
# in python 2.5 and later.
raise GypError("Call to '%s' returned exit status %d while in %s." %
(contents, p.returncode, build_file))
replacement = p_stdout.rstrip()
cached_command_results[cache_key] = replacement
else:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Had cache value for command '%s' in directory '%s'",
contents,build_file_dir)
replacement = cached_value
else:
if not contents in variables:
if contents[-1] in ['!', '/']:
# In order to allow cross-compiles (nacl) to happen more naturally,
# we will allow references to >(sources/) etc. to resolve to
# and empty list if undefined. This allows actions to:
# 'action!': [
# '>@(_sources!)',
# ],
# 'action/': [
# '>@(_sources/)',
# ],
replacement = []
else:
raise GypError('Undefined variable ' + contents +
' in ' + build_file)
else:
replacement = variables[contents]
if type(replacement) is list:
for item in replacement:
if not contents[-1] == '/' and type(item) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'list contains a ' +
item.__class__.__name__)
# Run through the list and handle variable expansions in it. Since
# the list is guaranteed not to contain dicts, this won't do anything
# with conditions sections.
ProcessVariablesAndConditionsInList(replacement, phase, variables,
build_file)
elif type(replacement) not in (str, int):
raise GypError('Variable ' + contents +
' must expand to a string or list of strings; ' +
'found a ' + replacement.__class__.__name__)
if expand_to_list:
# Expanding in list context. It's guaranteed that there's only one
# replacement to do in |input_str| and that it's this replacement. See
# above.
if type(replacement) is list:
# If it's already a list, make a copy.
output = replacement[:]
else:
# Split it the same way sh would split arguments.
output = shlex.split(str(replacement))
else:
# Expanding in string context.
encoded_replacement = ''
if type(replacement) is list:
# When expanding a list into string context, turn the list items
# into a string in a way that will work with a subprocess call.
#
# TODO(mark): This isn't completely correct. This should
# call a generator-provided function that observes the
# proper list-to-argument quoting rules on a specific
# platform instead of just calling the POSIX encoding
# routine.
encoded_replacement = gyp.common.EncodePOSIXShellList(replacement)
else:
encoded_replacement = replacement
output = output[:replace_start] + str(encoded_replacement) + \
output[replace_end:]
# Prepare for the next match iteration.
input_str = output
if output == input:
gyp.DebugOutput(gyp.DEBUG_VARIABLES,
"Found only identity matches on %r, avoiding infinite "
"recursion.",
output)
else:
# Look for more matches now that we've replaced some, to deal with
# expanding local variables (variables defined in the same
# variables block as this one).
gyp.DebugOutput(gyp.DEBUG_VARIABLES, "Found output %r, recursing.", output)
if type(output) is list:
if output and type(output[0]) is list:
# Leave output alone if it's a list of lists.
# We don't want such lists to be stringified.
pass
else:
new_output = []
for item in output:
new_output.append(
ExpandVariables(item, phase, variables, build_file))
output = new_output
else:
output = ExpandVariables(output, phase, variables, build_file)
# Convert all strings that are canonically-represented integers into integers.
if type(output) is list:
for index in xrange(0, len(output)):
if IsStrCanonicalInt(output[index]):
output[index] = int(output[index])
elif IsStrCanonicalInt(output):
output = int(output)
return output
# The same condition is often evaluated over and over again so it
# makes sense to cache as much as possible between evaluations.
cached_conditions_asts = {}
def EvalCondition(condition, conditions_key, phase, variables, build_file):
"""Returns the dict that should be used or None if the result was
that nothing should be used."""
if type(condition) is not list:
raise GypError(conditions_key + ' must be a list')
if len(condition) < 2:
# It's possible that condition[0] won't work in which case this
# attempt will raise its own IndexError. That's probably fine.
raise GypError(conditions_key + ' ' + condition[0] +
' must be at least length 2, not ' + str(len(condition)))
i = 0
result = None
while i < len(condition):
cond_expr = condition[i]
true_dict = condition[i + 1]
if type(true_dict) is not dict:
raise GypError('{} {} must be followed by a dictionary, not {}'.format(
conditions_key, cond_expr, type(true_dict)))
if len(condition) > i + 2 and type(condition[i + 2]) is dict:
false_dict = condition[i + 2]
i = i + 3
if i != len(condition):
raise GypError('{} {} has {} unexpected trailing items'.format(
conditions_key, cond_expr, len(condition) - i))
else:
false_dict = None
i = i + 2
if result == None:
result = EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file)
return result
def EvalSingleCondition(
cond_expr, true_dict, false_dict, phase, variables, build_file):
"""Returns true_dict if cond_expr evaluates to true, and false_dict
otherwise."""
# Do expansions on the condition itself. Since the conditon can naturally
# contain variable references without needing to resort to GYP expansion
# syntax, this is of dubious value for variables, but someone might want to
# use a command expansion directly inside a condition.
cond_expr_expanded = ExpandVariables(cond_expr, phase, variables,
build_file)
if type(cond_expr_expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + cond_expr_expanded.__class__.__name__)
try:
if cond_expr_expanded in cached_conditions_asts:
ast_code = cached_conditions_asts[cond_expr_expanded]
else:
ast_code = compile(cond_expr_expanded, '<string>', 'eval')
cached_conditions_asts[cond_expr_expanded] = ast_code
if eval(ast_code, {'__builtins__': None}, variables):
return true_dict
return false_dict
except SyntaxError, e:
syntax_error = SyntaxError('%s while evaluating condition \'%s\' in %s '
'at character %d.' %
(str(e.args[0]), e.text, build_file, e.offset),
e.filename, e.lineno, e.offset, e.text)
raise syntax_error
except NameError, e:
gyp.common.ExceptionAppend(e, 'while evaluating condition \'%s\' in %s' %
(cond_expr_expanded, build_file))
raise GypError(e)
def ProcessConditionsInDict(the_dict, phase, variables, build_file):
# Process a 'conditions' or 'target_conditions' section in the_dict,
# depending on phase.
# early -> conditions
# late -> target_conditions
# latelate -> no conditions
#
# Each item in a conditions list consists of cond_expr, a string expression
# evaluated as the condition, and true_dict, a dict that will be merged into
# the_dict if cond_expr evaluates to true. Optionally, a third item,
# false_dict, may be present. false_dict is merged into the_dict if
# cond_expr evaluates to false.
#
# Any dict merged into the_dict will be recursively processed for nested
# conditionals and other expansions, also according to phase, immediately
# prior to being merged.
if phase == PHASE_EARLY:
conditions_key = 'conditions'
elif phase == PHASE_LATE:
conditions_key = 'target_conditions'
elif phase == PHASE_LATELATE:
return
else:
assert False
if not conditions_key in the_dict:
return
conditions_list = the_dict[conditions_key]
# Unhook the conditions list, it's no longer needed.
del the_dict[conditions_key]
for condition in conditions_list:
merge_dict = EvalCondition(condition, conditions_key, phase, variables,
build_file)
if merge_dict != None:
# Expand variables and nested conditinals in the merge_dict before
# merging it.
ProcessVariablesAndConditionsInDict(merge_dict, phase,
variables, build_file)
MergeDicts(the_dict, merge_dict, build_file, build_file)
def LoadAutomaticVariablesFromDict(variables, the_dict):
# Any keys with plain string values in the_dict become automatic variables.
# The variable name is the key name with a "_" character prepended.
for key, value in the_dict.iteritems():
if type(value) in (str, int, list):
variables['_' + key] = value
def LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key):
# Any keys in the_dict's "variables" dict, if it has one, becomes a
# variable. The variable name is the key name in the "variables" dict.
# Variables that end with the % character are set only if they are unset in
# the variables dict. the_dict_key is the name of the key that accesses
# the_dict in the_dict's parent dict. If the_dict's parent is not a dict
# (it could be a list or it could be parentless because it is a root dict),
# the_dict_key will be None.
for key, value in the_dict.get('variables', {}).iteritems():
if type(value) not in (str, int, list):
continue
if key.endswith('%'):
variable_name = key[:-1]
if variable_name in variables:
# If the variable is already set, don't set it.
continue
if the_dict_key is 'variables' and variable_name in the_dict:
# If the variable is set without a % in the_dict, and the_dict is a
# variables dict (making |variables| a varaibles sub-dict of a
# variables dict), use the_dict's definition.
value = the_dict[variable_name]
else:
variable_name = key
variables[variable_name] = value
def ProcessVariablesAndConditionsInDict(the_dict, phase, variables_in,
build_file, the_dict_key=None):
"""Handle all variable and command expansion and conditional evaluation.
This function is the public entry point for all variable expansions and
conditional evaluations. The variables_in dictionary will not be modified
by this function.
"""
# Make a copy of the variables_in dict that can be modified during the
# loading of automatics and the loading of the variables dict.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
if 'variables' in the_dict:
# Make sure all the local variables are added to the variables
# list before we process them so that you can reference one
# variable from another. They will be fully expanded by recursion
# in ExpandVariables.
for key, value in the_dict['variables'].iteritems():
variables[key] = value
# Handle the associated variables dict first, so that any variable
# references within can be resolved prior to using them as variables.
# Pass a copy of the variables dict to avoid having it be tainted.
# Otherwise, it would have extra automatics added for everything that
# should just be an ordinary variable in this scope.
ProcessVariablesAndConditionsInDict(the_dict['variables'], phase,
variables, build_file, 'variables')
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
for key, value in the_dict.iteritems():
# Skip "variables", which was already processed if present.
if key != 'variables' and type(value) is str:
expanded = ExpandVariables(value, phase, variables, build_file)
if type(expanded) not in (str, int):
raise ValueError(
'Variable expansion in this context permits str and int ' + \
'only, found ' + expanded.__class__.__name__ + ' for ' + key)
the_dict[key] = expanded
# Variable expansion may have resulted in changes to automatics. Reload.
# TODO(mark): Optimization: only reload if no changes were made.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Process conditions in this dict. This is done after variable expansion
# so that conditions may take advantage of expanded variables. For example,
# if the_dict contains:
# {'type': '<(library_type)',
# 'conditions': [['_type=="static_library"', { ... }]]},
# _type, as used in the condition, will only be set to the value of
# library_type if variable expansion is performed before condition
# processing. However, condition processing should occur prior to recursion
# so that variables (both automatic and "variables" dict type) may be
# adjusted by conditions sections, merged into the_dict, and have the
# intended impact on contained dicts.
#
# This arrangement means that a "conditions" section containing a "variables"
# section will only have those variables effective in subdicts, not in
# the_dict. The workaround is to put a "conditions" section within a
# "variables" section. For example:
# {'conditions': [['os=="mac"', {'variables': {'define': 'IS_MAC'}}]],
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will not result in "IS_MAC" being appended to the "defines" list in the
# current scope but would result in it being appended to the "defines" list
# within "my_subdict". By comparison:
# {'variables': {'conditions': [['os=="mac"', {'define': 'IS_MAC'}]]},
# 'defines': ['<(define)'],
# 'my_subdict': {'defines': ['<(define)']}},
# will append "IS_MAC" to both "defines" lists.
# Evaluate conditions sections, allowing variable expansions within them
# as well as nested conditionals. This will process a 'conditions' or
# 'target_conditions' section, perform appropriate merging and recursive
# conditional and variable processing, and then remove the conditions section
# from the_dict if it is present.
ProcessConditionsInDict(the_dict, phase, variables, build_file)
# Conditional processing may have resulted in changes to automatics or the
# variables dict. Reload.
variables = variables_in.copy()
LoadAutomaticVariablesFromDict(variables, the_dict)
LoadVariablesFromVariablesDict(variables, the_dict, the_dict_key)
# Recurse into child dicts, or process child lists which may result in
# further recursion into descendant dicts.
for key, value in the_dict.iteritems():
# Skip "variables" and string values, which were already processed if
# present.
if key == 'variables' or type(value) is str:
continue
if type(value) is dict:
# Pass a copy of the variables dict so that subdicts can't influence
# parents.
ProcessVariablesAndConditionsInDict(value, phase, variables,
build_file, key)
elif type(value) is list:
# The list itself can't influence the variables dict, and
# ProcessVariablesAndConditionsInList will make copies of the variables
# dict if it needs to pass it to something that can influence it. No
# copy is necessary here.
ProcessVariablesAndConditionsInList(value, phase, variables,
build_file)
elif type(value) is not int:
raise TypeError('Unknown type ' + value.__class__.__name__ + \
' for ' + key)
def ProcessVariablesAndConditionsInList(the_list, phase, variables,
build_file):
# Iterate using an index so that new values can be assigned into the_list.
index = 0
while index < len(the_list):
item = the_list[index]
if type(item) is dict:
# Make a copy of the variables dict so that it won't influence anything
# outside of its own scope.
ProcessVariablesAndConditionsInDict(item, phase, variables, build_file)
elif type(item) is list:
ProcessVariablesAndConditionsInList(item, phase, variables, build_file)
elif type(item) is str:
expanded = ExpandVariables(item, phase, variables, build_file)
if type(expanded) in (str, int):
the_list[index] = expanded
elif type(expanded) is list:
the_list[index:index+1] = expanded
index += len(expanded)
# index now identifies the next item to examine. Continue right now
# without falling into the index increment below.
continue
else:
raise ValueError(
'Variable expansion in this context permits strings and ' + \
'lists only, found ' + expanded.__class__.__name__ + ' at ' + \
index)
elif type(item) is not int:
raise TypeError('Unknown type ' + item.__class__.__name__ + \
' at index ' + index)
index = index + 1
def BuildTargetsDict(data):
"""Builds a dict mapping fully-qualified target names to their target dicts.
|data| is a dict mapping loaded build files by pathname relative to the
current directory. Values in |data| are build file contents. For each
|data| value with a "targets" key, the value of the "targets" key is taken
as a list containing target dicts. Each target's fully-qualified name is
constructed from the pathname of the build file (|data| key) and its
"target_name" property. These fully-qualified names are used as the keys
in the returned dict. These keys provide access to the target dicts,
the dicts in the "targets" lists.
"""
targets = {}
for build_file in data['target_build_files']:
for target in data[build_file].get('targets', []):
target_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if target_name in targets:
raise GypError('Duplicate target definitions for ' + target_name)
targets[target_name] = target
return targets
def QualifyDependencies(targets):
"""Make dependency links fully-qualified relative to the current directory.
|targets| is a dict mapping fully-qualified target names to their target
dicts. For each target in this dict, keys known to contain dependency
links are examined, and any dependencies referenced will be rewritten
so that they are fully-qualified and relative to the current directory.
All rewritten dependencies are suitable for use as keys to |targets| or a
similar dict.
"""
all_dependency_sections = [dep + op
for dep in dependency_sections
for op in ('', '!', '/')]
for target, target_dict in targets.iteritems():
target_build_file = gyp.common.BuildFile(target)
toolset = target_dict['toolset']
for dependency_key in all_dependency_sections:
dependencies = target_dict.get(dependency_key, [])
for index in xrange(0, len(dependencies)):
dep_file, dep_target, dep_toolset = gyp.common.ResolveTarget(
target_build_file, dependencies[index], toolset)
if not multiple_toolsets:
# Ignore toolset specification in the dependency if it is specified.
dep_toolset = toolset
dependency = gyp.common.QualifiedTarget(dep_file,
dep_target,
dep_toolset)
dependencies[index] = dependency
# Make sure anything appearing in a list other than "dependencies" also
# appears in the "dependencies" list.
if dependency_key != 'dependencies' and \
dependency not in target_dict['dependencies']:
raise GypError('Found ' + dependency + ' in ' + dependency_key +
' of ' + target + ', but not in dependencies')
def ExpandWildcardDependencies(targets, data):
"""Expands dependencies specified as build_file:*.
For each target in |targets|, examines sections containing links to other
targets. If any such section contains a link of the form build_file:*, it
is taken as a wildcard link, and is expanded to list each target in
build_file. The |data| dict provides access to build file dicts.
Any target that does not wish to be included by wildcard can provide an
optional "suppress_wildcard" key in its target dict. When present and
true, a wildcard dependency link will not include such targets.
All dependency names, including the keys to |targets| and the values in each
dependency list, must be qualified when this function is called.
"""
for target, target_dict in targets.iteritems():
toolset = target_dict['toolset']
target_build_file = gyp.common.BuildFile(target)
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
# Loop this way instead of "for dependency in" or "for index in xrange"
# because the dependencies list will be modified within the loop body.
index = 0
while index < len(dependencies):
(dependency_build_file, dependency_target, dependency_toolset) = \
gyp.common.ParseQualifiedTarget(dependencies[index])
if dependency_target != '*' and dependency_toolset != '*':
# Not a wildcard. Keep it moving.
index = index + 1
continue
if dependency_build_file == target_build_file:
# It's an error for a target to depend on all other targets in
# the same file, because a target cannot depend on itself.
raise GypError('Found wildcard in ' + dependency_key + ' of ' +
target + ' referring to same build file')
# Take the wildcard out and adjust the index so that the next
# dependency in the list will be processed the next time through the
# loop.
del dependencies[index]
index = index - 1
# Loop through the targets in the other build file, adding them to
# this target's list of dependencies in place of the removed
# wildcard.
dependency_target_dicts = data[dependency_build_file]['targets']
for dependency_target_dict in dependency_target_dicts:
if int(dependency_target_dict.get('suppress_wildcard', False)):
continue
dependency_target_name = dependency_target_dict['target_name']
if (dependency_target != '*' and
dependency_target != dependency_target_name):
continue
dependency_target_toolset = dependency_target_dict['toolset']
if (dependency_toolset != '*' and
dependency_toolset != dependency_target_toolset):
continue
dependency = gyp.common.QualifiedTarget(dependency_build_file,
dependency_target_name,
dependency_target_toolset)
index = index + 1
dependencies.insert(index, dependency)
index = index + 1
def Unify(l):
"""Removes duplicate elements from l, keeping the first element."""
seen = {}
return [seen.setdefault(e, e) for e in l if e not in seen]
def RemoveDuplicateDependencies(targets):
"""Makes sure every dependency appears only once in all targets's dependency
lists."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
target_dict[dependency_key] = Unify(dependencies)
def Filter(l, item):
"""Removes item from l."""
res = {}
return [res.setdefault(e, e) for e in l if e != item]
def RemoveSelfDependencies(targets):
"""Remove self dependencies from targets that have the prune_self_dependency
variable set."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if t == target_name:
if targets[t].get('variables', {}).get('prune_self_dependency', 0):
target_dict[dependency_key] = Filter(dependencies, target_name)
def RemoveLinkDependenciesFromNoneTargets(targets):
"""Remove dependencies having the 'link_dependency' attribute from the 'none'
targets."""
for target_name, target_dict in targets.iteritems():
for dependency_key in dependency_sections:
dependencies = target_dict.get(dependency_key, [])
if dependencies:
for t in dependencies:
if target_dict.get('type', None) == 'none':
if targets[t].get('variables', {}).get('link_dependency', 0):
target_dict[dependency_key] = \
Filter(target_dict[dependency_key], t)
class DependencyGraphNode(object):
"""
Attributes:
ref: A reference to an object that this DependencyGraphNode represents.
dependencies: List of DependencyGraphNodes on which this one depends.
dependents: List of DependencyGraphNodes that depend on this one.
"""
class CircularException(GypError):
pass
def __init__(self, ref):
self.ref = ref
self.dependencies = []
self.dependents = []
def __repr__(self):
return '<DependencyGraphNode: %r>' % self.ref
def FlattenToList(self):
# flat_list is the sorted list of dependencies - actually, the list items
# are the "ref" attributes of DependencyGraphNodes. Every target will
# appear in flat_list after all of its dependencies, and before all of its
# dependents.
flat_list = OrderedSet()
# in_degree_zeros is the list of DependencyGraphNodes that have no
# dependencies not in flat_list. Initially, it is a copy of the children
# of this node, because when the graph was built, nodes with no
# dependencies were made implicit dependents of the root node.
in_degree_zeros = set(self.dependents[:])
while in_degree_zeros:
# Nodes in in_degree_zeros have no dependencies not in flat_list, so they
# can be appended to flat_list. Take these nodes out of in_degree_zeros
# as work progresses, so that the next node to process from the list can
# always be accessed at a consistent position.
node = in_degree_zeros.pop()
flat_list.add(node.ref)
# Look at dependents of the node just added to flat_list. Some of them
# may now belong in in_degree_zeros.
for node_dependent in node.dependents:
is_in_degree_zero = True
# TODO: We want to check through the
# node_dependent.dependencies list but if it's long and we
# always start at the beginning, then we get O(n^2) behaviour.
for node_dependent_dependency in node_dependent.dependencies:
if not node_dependent_dependency.ref in flat_list:
# The dependent one or more dependencies not in flat_list. There
# will be more chances to add it to flat_list when examining
# it again as a dependent of those other dependencies, provided
# that there are no cycles.
is_in_degree_zero = False
break
if is_in_degree_zero:
# All of the dependent's dependencies are already in flat_list. Add
# it to in_degree_zeros where it will be processed in a future
# iteration of the outer loop.
in_degree_zeros.add(node_dependent)
return list(flat_list)
def FindCycles(self):
"""
Returns a list of cycles in the graph, where each cycle is its own list.
"""
results = []
visited = set()
def Visit(node, path):
for child in node.dependents:
if child in path:
results.append([child] + path[:path.index(child) + 1])
elif not child in visited:
visited.add(child)
Visit(child, [child] + path)
visited.add(self)
Visit(self, [self])
return results
def DirectDependencies(self, dependencies=None):
"""Returns a list of just direct dependencies."""
if dependencies == None:
dependencies = []
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref != None and dependency.ref not in dependencies:
dependencies.append(dependency.ref)
return dependencies
def _AddImportedDependencies(self, targets, dependencies=None):
"""Given a list of direct dependencies, adds indirect dependencies that
other dependencies have declared to export their settings.
This method does not operate on self. Rather, it operates on the list
of dependencies in the |dependencies| argument. For each dependency in
that list, if any declares that it exports the settings of one of its
own dependencies, those dependencies whose settings are "passed through"
are added to the list. As new items are added to the list, they too will
be processed, so it is possible to import settings through multiple levels
of dependencies.
This method is not terribly useful on its own, it depends on being
"primed" with a list of direct dependencies such as one provided by
DirectDependencies. DirectAndImportedDependencies is intended to be the
public entry point.
"""
if dependencies == None:
dependencies = []
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Add any dependencies whose settings should be imported to the list
# if not already present. Newly-added items will be checked for
# their own imports when the list iteration reaches them.
# Rather than simply appending new items, insert them after the
# dependency that exported them. This is done to more closely match
# the depth-first method used by DeepDependencies.
add_index = 1
for imported_dependency in \
dependency_dict.get('export_dependent_settings', []):
if imported_dependency not in dependencies:
dependencies.insert(index + add_index, imported_dependency)
add_index = add_index + 1
index = index + 1
return dependencies
def DirectAndImportedDependencies(self, targets, dependencies=None):
"""Returns a list of a target's direct dependencies and all indirect
dependencies that a dependency has advertised settings should be exported
through the dependency for.
"""
dependencies = self.DirectDependencies(dependencies)
return self._AddImportedDependencies(targets, dependencies)
def DeepDependencies(self, dependencies=None):
"""Returns an OrderedSet of all of a target's dependencies, recursively."""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
for dependency in self.dependencies:
# Check for None, corresponding to the root node.
if dependency.ref is None:
continue
if dependency.ref not in dependencies:
dependency.DeepDependencies(dependencies)
dependencies.add(dependency.ref)
return dependencies
def _LinkDependenciesInternal(self, targets, include_shared_libraries,
dependencies=None, initial=True):
"""Returns an OrderedSet of dependency targets that are linked
into this target.
This function has a split personality, depending on the setting of
|initial|. Outside callers should always leave |initial| at its default
setting.
When adding a target to the list of dependencies, this function will
recurse into itself with |initial| set to False, to collect dependencies
that are linked into the linkable target for which the list is being built.
If |include_shared_libraries| is False, the resulting dependencies will not
include shared_library targets that are linked into this target.
"""
if dependencies is None:
# Using a list to get ordered output and a set to do fast "is it
# already added" checks.
dependencies = OrderedSet()
# Check for None, corresponding to the root node.
if self.ref is None:
return dependencies
# It's kind of sucky that |targets| has to be passed into this function,
# but that's presently the easiest way to access the target dicts so that
# this function can find target types.
if 'target_name' not in targets[self.ref]:
raise GypError("Missing 'target_name' field in target.")
if 'type' not in targets[self.ref]:
raise GypError("Missing 'type' field in target %s" %
targets[self.ref]['target_name'])
target_type = targets[self.ref]['type']
is_linkable = target_type in linkable_types
if initial and not is_linkable:
# If this is the first target being examined and it's not linkable,
# return an empty list of link dependencies, because the link
# dependencies are intended to apply to the target itself (initial is
# True) and this target won't be linked.
return dependencies
# Don't traverse 'none' targets if explicitly excluded.
if (target_type == 'none' and
not targets[self.ref].get('dependencies_traverse', True)):
dependencies.add(self.ref)
return dependencies
# Executables and loadable modules are already fully and finally linked.
# Nothing else can be a link dependency of them, there can only be
# dependencies in the sense that a dependent target might run an
# executable or load the loadable_module.
if not initial and target_type in ('executable', 'loadable_module'):
return dependencies
# Shared libraries are already fully linked. They should only be included
# in |dependencies| when adjusting static library dependencies (in order to
# link against the shared_library's import lib), but should not be included
# in |dependencies| when propagating link_settings.
# The |include_shared_libraries| flag controls which of these two cases we
# are handling.
if (not initial and target_type == 'shared_library' and
not include_shared_libraries):
return dependencies
# The target is linkable, add it to the list of link dependencies.
if self.ref not in dependencies:
dependencies.add(self.ref)
if initial or not is_linkable:
# If this is a subsequent target and it's linkable, don't look any
# further for linkable dependencies, as they'll already be linked into
# this target linkable. Always look at dependencies of the initial
# target, and always look at dependencies of non-linkables.
for dependency in self.dependencies:
dependency._LinkDependenciesInternal(targets,
include_shared_libraries,
dependencies, False)
return dependencies
def DependenciesForLinkSettings(self, targets):
"""
Returns a list of dependency targets whose link_settings should be merged
into this target.
"""
# TODO(sbaig) Currently, chrome depends on the bug that shared libraries'
# link_settings are propagated. So for now, we will allow it, unless the
# 'allow_sharedlib_linksettings_propagation' flag is explicitly set to
# False. Once chrome is fixed, we can remove this flag.
include_shared_libraries = \
targets[self.ref].get('allow_sharedlib_linksettings_propagation', True)
return self._LinkDependenciesInternal(targets, include_shared_libraries)
def DependenciesToLinkAgainst(self, targets):
"""
Returns a list of dependency targets that are linked into this target.
"""
return self._LinkDependenciesInternal(targets, True)
def BuildDependencyList(targets):
# Create a DependencyGraphNode for each target. Put it into a dict for easy
# access.
dependency_nodes = {}
for target, spec in targets.iteritems():
if target not in dependency_nodes:
dependency_nodes[target] = DependencyGraphNode(target)
# Set up the dependency links. Targets that have no dependencies are treated
# as dependent on root_node.
root_node = DependencyGraphNode(None)
for target, spec in targets.iteritems():
target_node = dependency_nodes[target]
target_build_file = gyp.common.BuildFile(target)
dependencies = spec.get('dependencies')
if not dependencies:
target_node.dependencies = [root_node]
root_node.dependents.append(target_node)
else:
for dependency in dependencies:
dependency_node = dependency_nodes.get(dependency)
if not dependency_node:
raise GypError("Dependency '%s' not found while "
"trying to load target %s" % (dependency, target))
target_node.dependencies.append(dependency_node)
dependency_node.dependents.append(target_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(targets):
if not root_node.dependents:
# If all targets have dependencies, add the first target as a dependent
# of root_node so that the cycle can be discovered from root_node.
target = targets.keys()[0]
target_node = dependency_nodes[target]
target_node.dependencies.append(root_node)
root_node.dependents.append(target_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in dependency graph detected:\n' + '\n'.join(cycles))
return [dependency_nodes, flat_list]
def VerifyNoGYPFileCircularDependencies(targets):
# Create a DependencyGraphNode for each gyp file containing a target. Put
# it into a dict for easy access.
dependency_nodes = {}
for target in targets.iterkeys():
build_file = gyp.common.BuildFile(target)
if not build_file in dependency_nodes:
dependency_nodes[build_file] = DependencyGraphNode(build_file)
# Set up the dependency links.
for target, spec in targets.iteritems():
build_file = gyp.common.BuildFile(target)
build_file_node = dependency_nodes[build_file]
target_dependencies = spec.get('dependencies', [])
for dependency in target_dependencies:
try:
dependency_build_file = gyp.common.BuildFile(dependency)
except GypError, e:
gyp.common.ExceptionAppend(
e, 'while computing dependencies of .gyp file %s' % build_file)
raise
if dependency_build_file == build_file:
# A .gyp file is allowed to refer back to itself.
continue
dependency_node = dependency_nodes.get(dependency_build_file)
if not dependency_node:
raise GypError("Dependancy '%s' not found" % dependency_build_file)
if dependency_node not in build_file_node.dependencies:
build_file_node.dependencies.append(dependency_node)
dependency_node.dependents.append(build_file_node)
# Files that have no dependencies are treated as dependent on root_node.
root_node = DependencyGraphNode(None)
for build_file_node in dependency_nodes.itervalues():
if len(build_file_node.dependencies) == 0:
build_file_node.dependencies.append(root_node)
root_node.dependents.append(build_file_node)
flat_list = root_node.FlattenToList()
# If there's anything left unvisited, there must be a circular dependency
# (cycle).
if len(flat_list) != len(dependency_nodes):
if not root_node.dependents:
# If all files have dependencies, add the first file as a dependent
# of root_node so that the cycle can be discovered from root_node.
file_node = dependency_nodes.values()[0]
file_node.dependencies.append(root_node)
root_node.dependents.append(file_node)
cycles = []
for cycle in root_node.FindCycles():
paths = [node.ref for node in cycle]
cycles.append('Cycle: %s' % ' -> '.join(paths))
raise DependencyGraphNode.CircularException(
'Cycles in .gyp file dependency graph detected:\n' + '\n'.join(cycles))
def DoDependentSettings(key, flat_list, targets, dependency_nodes):
# key should be one of all_dependent_settings, direct_dependent_settings,
# or link_settings.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
if key == 'all_dependent_settings':
dependencies = dependency_nodes[target].DeepDependencies()
elif key == 'direct_dependent_settings':
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
elif key == 'link_settings':
dependencies = \
dependency_nodes[target].DependenciesForLinkSettings(targets)
else:
raise GypError("DoDependentSettings doesn't know how to determine "
'dependencies for ' + key)
for dependency in dependencies:
dependency_dict = targets[dependency]
if not key in dependency_dict:
continue
dependency_build_file = gyp.common.BuildFile(dependency)
MergeDicts(target_dict, dependency_dict[key],
build_file, dependency_build_file)
def AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
sort_dependencies):
# Recompute target "dependencies" properties. For each static library
# target, remove "dependencies" entries referring to other static libraries,
# unless the dependency has the "hard_dependency" attribute set. For each
# linkable target, add a "dependencies" entry referring to all of the
# target's computed list of link dependencies (including static libraries
# if no such entry is already present.
for target in flat_list:
target_dict = targets[target]
target_type = target_dict['type']
if target_type == 'static_library':
if not 'dependencies' in target_dict:
continue
target_dict['dependencies_original'] = target_dict.get(
'dependencies', [])[:]
# A static library should not depend on another static library unless
# the dependency relationship is "hard," which should only be done when
# a dependent relies on some side effect other than just the build
# product, like a rule or action output. Further, if a target has a
# non-hard dependency, but that dependency exports a hard dependency,
# the non-hard dependency can safely be removed, but the exported hard
# dependency must be added to the target to keep the same dependency
# ordering.
dependencies = \
dependency_nodes[target].DirectAndImportedDependencies(targets)
index = 0
while index < len(dependencies):
dependency = dependencies[index]
dependency_dict = targets[dependency]
# Remove every non-hard static library dependency and remove every
# non-static library dependency that isn't a direct dependency.
if (dependency_dict['type'] == 'static_library' and \
not dependency_dict.get('hard_dependency', False)) or \
(dependency_dict['type'] != 'static_library' and \
not dependency in target_dict['dependencies']):
# Take the dependency out of the list, and don't increment index
# because the next dependency to analyze will shift into the index
# formerly occupied by the one being removed.
del dependencies[index]
else:
index = index + 1
# Update the dependencies. If the dependencies list is empty, it's not
# needed, so unhook it.
if len(dependencies) > 0:
target_dict['dependencies'] = dependencies
else:
del target_dict['dependencies']
elif target_type in linkable_types:
# Get a list of dependency targets that should be linked into this
# target. Add them to the dependencies list if they're not already
# present.
link_dependencies = \
dependency_nodes[target].DependenciesToLinkAgainst(targets)
for dependency in link_dependencies:
if dependency == target:
continue
if not 'dependencies' in target_dict:
target_dict['dependencies'] = []
if not dependency in target_dict['dependencies']:
target_dict['dependencies'].append(dependency)
# Sort the dependencies list in the order from dependents to dependencies.
# e.g. If A and B depend on C and C depends on D, sort them in A, B, C, D.
# Note: flat_list is already sorted in the order from dependencies to
# dependents.
if sort_dependencies and 'dependencies' in target_dict:
target_dict['dependencies'] = [dep for dep in reversed(flat_list)
if dep in target_dict['dependencies']]
# Initialize this here to speed up MakePathRelative.
exception_re = re.compile(r'''["']?[-/$<>^]''')
def MakePathRelative(to_file, fro_file, item):
# If item is a relative path, it's relative to the build file dict that it's
# coming from. Fix it up to make it relative to the build file dict that
# it's going into.
# Exception: any |item| that begins with these special characters is
# returned without modification.
# / Used when a path is already absolute (shortcut optimization;
# such paths would be returned as absolute anyway)
# $ Used for build environment variables
# - Used for some build environment flags (such as -lapr-1 in a
# "libraries" section)
# < Used for our own variable and command expansions (see ExpandVariables)
# > Used for our own variable and command expansions (see ExpandVariables)
# ^ Used for our own variable and command expansions (see ExpandVariables)
#
# "/' Used when a value is quoted. If these are present, then we
# check the second character instead.
#
if to_file == fro_file or exception_re.match(item):
return item
else:
# TODO(dglazkov) The backslash/forward-slash replacement at the end is a
# temporary measure. This should really be addressed by keeping all paths
# in POSIX until actual project generation.
ret = os.path.normpath(os.path.join(
gyp.common.RelativePath(os.path.dirname(fro_file),
os.path.dirname(to_file)),
item)).replace('\\', '/')
if item[-1] == '/':
ret += '/'
return ret
def MergeLists(to, fro, to_file, fro_file, is_paths=False, append=True):
# Python documentation recommends objects which do not support hash
# set this value to None. Python library objects follow this rule.
is_hashable = lambda val: val.__hash__
# If x is hashable, returns whether x is in s. Else returns whether x is in l.
def is_in_set_or_list(x, s, l):
if is_hashable(x):
return x in s
return x in l
prepend_index = 0
# Make membership testing of hashables in |to| (in particular, strings)
# faster.
hashable_to_set = set(x for x in to if is_hashable(x))
for item in fro:
singleton = False
if type(item) in (str, int):
# The cheap and easy case.
if is_paths:
to_item = MakePathRelative(to_file, fro_file, item)
else:
to_item = item
if not (type(item) is str and item.startswith('-')):
# Any string that doesn't begin with a "-" is a singleton - it can
# only appear once in a list, to be enforced by the list merge append
# or prepend.
singleton = True
elif type(item) is dict:
# Make a copy of the dictionary, continuing to look for paths to fix.
# The other intelligent aspects of merge processing won't apply because
# item is being merged into an empty dict.
to_item = {}
MergeDicts(to_item, item, to_file, fro_file)
elif type(item) is list:
# Recurse, making a copy of the list. If the list contains any
# descendant dicts, path fixing will occur. Note that here, custom
# values for is_paths and append are dropped; those are only to be
# applied to |to| and |fro|, not sublists of |fro|. append shouldn't
# matter anyway because the new |to_item| list is empty.
to_item = []
MergeLists(to_item, item, to_file, fro_file)
else:
raise TypeError(
'Attempt to merge list item of unsupported type ' + \
item.__class__.__name__)
if append:
# If appending a singleton that's already in the list, don't append.
# This ensures that the earliest occurrence of the item will stay put.
if not singleton or not is_in_set_or_list(to_item, hashable_to_set, to):
to.append(to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
else:
# If prepending a singleton that's already in the list, remove the
# existing instance and proceed with the prepend. This ensures that the
# item appears at the earliest possible position in the list.
while singleton and to_item in to:
to.remove(to_item)
# Don't just insert everything at index 0. That would prepend the new
# items to the list in reverse order, which would be an unwelcome
# surprise.
to.insert(prepend_index, to_item)
if is_hashable(to_item):
hashable_to_set.add(to_item)
prepend_index = prepend_index + 1
def MergeDicts(to, fro, to_file, fro_file):
# I wanted to name the parameter "from" but it's a Python keyword...
for k, v in fro.iteritems():
# It would be nice to do "if not k in to: to[k] = v" but that wouldn't give
# copy semantics. Something else may want to merge from the |fro| dict
# later, and having the same dict ref pointed to twice in the tree isn't
# what anyone wants considering that the dicts may subsequently be
# modified.
if k in to:
bad_merge = False
if type(v) in (str, int):
if type(to[k]) not in (str, int):
bad_merge = True
elif type(v) is not type(to[k]):
bad_merge = True
if bad_merge:
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[k].__class__.__name__ + \
' for key ' + k)
if type(v) in (str, int):
# Overwrite the existing value, if any. Cheap and easy.
is_path = IsPathSection(k)
if is_path:
to[k] = MakePathRelative(to_file, fro_file, v)
else:
to[k] = v
elif type(v) is dict:
# Recurse, guaranteeing copies will be made of objects that require it.
if not k in to:
to[k] = {}
MergeDicts(to[k], v, to_file, fro_file)
elif type(v) is list:
# Lists in dicts can be merged with different policies, depending on
# how the key in the "from" dict (k, the from-key) is written.
#
# If the from-key has ...the to-list will have this action
# this character appended:... applied when receiving the from-list:
# = replace
# + prepend
# ? set, only if to-list does not yet exist
# (none) append
#
# This logic is list-specific, but since it relies on the associated
# dict key, it's checked in this dict-oriented function.
ext = k[-1]
append = True
if ext == '=':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '?']
to[list_base] = []
elif ext == '+':
list_base = k[:-1]
lists_incompatible = [list_base + '=', list_base + '?']
append = False
elif ext == '?':
list_base = k[:-1]
lists_incompatible = [list_base, list_base + '=', list_base + '+']
else:
list_base = k
lists_incompatible = [list_base + '=', list_base + '?']
# Some combinations of merge policies appearing together are meaningless.
# It's stupid to replace and append simultaneously, for example. Append
# and prepend are the only policies that can coexist.
for list_incompatible in lists_incompatible:
if list_incompatible in fro:
raise GypError('Incompatible list policies ' + k + ' and ' +
list_incompatible)
if list_base in to:
if ext == '?':
# If the key ends in "?", the list will only be merged if it doesn't
# already exist.
continue
elif type(to[list_base]) is not list:
# This may not have been checked above if merging in a list with an
# extension character.
raise TypeError(
'Attempt to merge dict value of type ' + v.__class__.__name__ + \
' into incompatible type ' + to[list_base].__class__.__name__ + \
' for key ' + list_base + '(' + k + ')')
else:
to[list_base] = []
# Call MergeLists, which will make copies of objects that require it.
# MergeLists can recurse back into MergeDicts, although this will be
# to make copies of dicts (with paths fixed), there will be no
# subsequent dict "merging" once entering a list because lists are
# always replaced, appended to, or prepended to.
is_paths = IsPathSection(list_base)
MergeLists(to[list_base], v, to_file, fro_file, is_paths, append)
else:
raise TypeError(
'Attempt to merge dict value of unsupported type ' + \
v.__class__.__name__ + ' for key ' + k)
def MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, visited):
# Skip if previously visted.
if configuration in visited:
return
# Look at this configuration.
configuration_dict = target_dict['configurations'][configuration]
# Merge in parents.
for parent in configuration_dict.get('inherit_from', []):
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, parent, visited + [configuration])
# Merge it into the new config.
MergeDicts(new_configuration_dict, configuration_dict,
build_file, build_file)
# Drop abstract.
if 'abstract' in new_configuration_dict:
del new_configuration_dict['abstract']
def SetUpConfigurations(target, target_dict):
# key_suffixes is a list of key suffixes that might appear on key names.
# These suffixes are handled in conditional evaluations (for =, +, and ?)
# and rules/exclude processing (for ! and /). Keys with these suffixes
# should be treated the same as keys without.
key_suffixes = ['=', '+', '?', '!', '/']
build_file = gyp.common.BuildFile(target)
# Provide a single configuration by default if none exists.
# TODO(mark): Signal an error if default_configurations exists but
# configurations does not.
if not 'configurations' in target_dict:
target_dict['configurations'] = {'Default': {}}
if not 'default_configuration' in target_dict:
concrete = [i for (i, config) in target_dict['configurations'].iteritems()
if not config.get('abstract')]
target_dict['default_configuration'] = sorted(concrete)[0]
merged_configurations = {}
configs = target_dict['configurations']
for (configuration, old_configuration_dict) in configs.iteritems():
# Skip abstract configurations (saves work only).
if old_configuration_dict.get('abstract'):
continue
# Configurations inherit (most) settings from the enclosing target scope.
# Get the inheritance relationship right by making a copy of the target
# dict.
new_configuration_dict = {}
for (key, target_val) in target_dict.iteritems():
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
new_configuration_dict[key] = gyp.simple_copy.deepcopy(target_val)
# Merge in configuration (with all its parents first).
MergeConfigWithInheritance(new_configuration_dict, build_file,
target_dict, configuration, [])
merged_configurations[configuration] = new_configuration_dict
# Put the new configurations back into the target dict as a configuration.
for configuration in merged_configurations.keys():
target_dict['configurations'][configuration] = (
merged_configurations[configuration])
# Now drop all the abstract ones.
for configuration in target_dict['configurations'].keys():
old_configuration_dict = target_dict['configurations'][configuration]
if old_configuration_dict.get('abstract'):
del target_dict['configurations'][configuration]
# Now that all of the target's configurations have been built, go through
# the target dict's keys and remove everything that's been moved into a
# "configurations" section.
delete_keys = []
for key in target_dict:
key_ext = key[-1:]
if key_ext in key_suffixes:
key_base = key[:-1]
else:
key_base = key
if not key_base in non_configuration_keys:
delete_keys.append(key)
for key in delete_keys:
del target_dict[key]
# Check the configurations to see if they contain invalid keys.
for configuration in target_dict['configurations'].keys():
configuration_dict = target_dict['configurations'][configuration]
for key in configuration_dict.keys():
if key in invalid_configuration_keys:
raise GypError('%s not allowed in the %s configuration, found in '
'target %s' % (key, configuration, target))
def ProcessListFiltersInDict(name, the_dict):
"""Process regular expression and exclusion-based filters on lists.
An exclusion list is in a dict key named with a trailing "!", like
"sources!". Every item in such a list is removed from the associated
main list, which in this example, would be "sources". Removed items are
placed into a "sources_excluded" list in the dict.
Regular expression (regex) filters are contained in dict keys named with a
trailing "/", such as "sources/" to operate on the "sources" list. Regex
filters in a dict take the form:
'sources/': [ ['exclude', '_(linux|mac|win)\\.cc$'],
['include', '_mac\\.cc$'] ],
The first filter says to exclude all files ending in _linux.cc, _mac.cc, and
_win.cc. The second filter then includes all files ending in _mac.cc that
are now or were once in the "sources" list. Items matching an "exclude"
filter are subject to the same processing as would occur if they were listed
by name in an exclusion list (ending in "!"). Items matching an "include"
filter are brought back into the main list if previously excluded by an
exclusion list or exclusion regex filter. Subsequent matching "exclude"
patterns can still cause items to be excluded after matching an "include".
"""
# Look through the dictionary for any lists whose keys end in "!" or "/".
# These are lists that will be treated as exclude lists and regular
# expression-based exclude/include lists. Collect the lists that are
# needed first, looking for the lists that they operate on, and assemble
# then into |lists|. This is done in a separate loop up front, because
# the _included and _excluded keys need to be added to the_dict, and that
# can't be done while iterating through it.
lists = []
del_lists = []
for key, value in the_dict.iteritems():
operation = key[-1]
if operation != '!' and operation != '/':
continue
if type(value) is not list:
raise ValueError(name + ' key ' + key + ' must be list, not ' + \
value.__class__.__name__)
list_key = key[:-1]
if list_key not in the_dict:
# This happens when there's a list like "sources!" but no corresponding
# "sources" list. Since there's nothing for it to operate on, queue up
# the "sources!" list for deletion now.
del_lists.append(key)
continue
if type(the_dict[list_key]) is not list:
value = the_dict[list_key]
raise ValueError(name + ' key ' + list_key + \
' must be list, not ' + \
value.__class__.__name__ + ' when applying ' + \
{'!': 'exclusion', '/': 'regex'}[operation])
if not list_key in lists:
lists.append(list_key)
# Delete the lists that are known to be unneeded at this point.
for del_list in del_lists:
del the_dict[del_list]
for list_key in lists:
the_list = the_dict[list_key]
# Initialize the list_actions list, which is parallel to the_list. Each
# item in list_actions identifies whether the corresponding item in
# the_list should be excluded, unconditionally preserved (included), or
# whether no exclusion or inclusion has been applied. Items for which
# no exclusion or inclusion has been applied (yet) have value -1, items
# excluded have value 0, and items included have value 1. Includes and
# excludes override previous actions. All items in list_actions are
# initialized to -1 because no excludes or includes have been processed
# yet.
list_actions = list((-1,) * len(the_list))
exclude_key = list_key + '!'
if exclude_key in the_dict:
for exclude_item in the_dict[exclude_key]:
for index in xrange(0, len(the_list)):
if exclude_item == the_list[index]:
# This item matches the exclude_item, so set its action to 0
# (exclude).
list_actions[index] = 0
# The "whatever!" list is no longer needed, dump it.
del the_dict[exclude_key]
regex_key = list_key + '/'
if regex_key in the_dict:
for regex_item in the_dict[regex_key]:
[action, pattern] = regex_item
pattern_re = re.compile(pattern)
if action == 'exclude':
# This item matches an exclude regex, so set its value to 0 (exclude).
action_value = 0
elif action == 'include':
# This item matches an include regex, so set its value to 1 (include).
action_value = 1
else:
# This is an action that doesn't make any sense.
raise ValueError('Unrecognized action ' + action + ' in ' + name + \
' key ' + regex_key)
for index in xrange(0, len(the_list)):
list_item = the_list[index]
if list_actions[index] == action_value:
# Even if the regex matches, nothing will change so continue (regex
# searches are expensive).
continue
if pattern_re.search(list_item):
# Regular expression match.
list_actions[index] = action_value
# The "whatever/" list is no longer needed, dump it.
del the_dict[regex_key]
# Add excluded items to the excluded list.
#
# Note that exclude_key ("sources!") is different from excluded_key
# ("sources_excluded"). The exclude_key list is input and it was already
# processed and deleted; the excluded_key list is output and it's about
# to be created.
excluded_key = list_key + '_excluded'
if excluded_key in the_dict:
raise GypError(name + ' key ' + excluded_key +
' must not be present prior '
' to applying exclusion/regex filters for ' + list_key)
excluded_list = []
# Go backwards through the list_actions list so that as items are deleted,
# the indices of items that haven't been seen yet don't shift. That means
# that things need to be prepended to excluded_list to maintain them in the
# same order that they existed in the_list.
for index in xrange(len(list_actions) - 1, -1, -1):
if list_actions[index] == 0:
# Dump anything with action 0 (exclude). Keep anything with action 1
# (include) or -1 (no include or exclude seen for the item).
excluded_list.insert(0, the_list[index])
del the_list[index]
# If anything was excluded, put the excluded list into the_dict at
# excluded_key.
if len(excluded_list) > 0:
the_dict[excluded_key] = excluded_list
# Now recurse into subdicts and lists that may contain dicts.
for key, value in the_dict.iteritems():
if type(value) is dict:
ProcessListFiltersInDict(key, value)
elif type(value) is list:
ProcessListFiltersInList(key, value)
def ProcessListFiltersInList(name, the_list):
for item in the_list:
if type(item) is dict:
ProcessListFiltersInDict(name, item)
elif type(item) is list:
ProcessListFiltersInList(name, item)
def ValidateTargetType(target, target_dict):
"""Ensures the 'type' field on the target is one of the known types.
Arguments:
target: string, name of target.
target_dict: dict, target spec.
Raises an exception on error.
"""
VALID_TARGET_TYPES = ('executable', 'loadable_module',
'static_library', 'shared_library',
'none')
target_type = target_dict.get('type', None)
if target_type not in VALID_TARGET_TYPES:
raise GypError("Target %s has an invalid target type '%s'. "
"Must be one of %s." %
(target, target_type, '/'.join(VALID_TARGET_TYPES)))
if (target_dict.get('standalone_static_library', 0) and
not target_type == 'static_library'):
raise GypError('Target %s has type %s but standalone_static_library flag is'
' only valid for static_library type.' % (target,
target_type))
def ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check):
if not duplicate_basename_check:
return
if target_dict.get('type', None) != 'static_library':
return
sources = target_dict.get('sources', [])
basenames = {}
for source in sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
target + error + 'libtool on Mac cannot handle that. Use '
'--no-duplicate-basename-check to disable this validation.')
raise GypError('Duplicate basenames in sources section, see list above')
def ValidateRulesInTarget(target, target_dict, extra_sources_for_rules):
"""Ensures that the rules sections in target_dict are valid and consistent,
and determines which sources they apply to.
Arguments:
target: string, name of target.
target_dict: dict, target spec containing "rules" and "sources" lists.
extra_sources_for_rules: a list of keys to scan for rule matches in
addition to 'sources'.
"""
# Dicts to map between values found in rules' 'rule_name' and 'extension'
# keys and the rule dicts themselves.
rule_names = {}
rule_extensions = {}
rules = target_dict.get('rules', [])
for rule in rules:
# Make sure that there's no conflict among rule names and extensions.
rule_name = rule['rule_name']
if rule_name in rule_names:
raise GypError('rule %s exists in duplicate, target %s' %
(rule_name, target))
rule_names[rule_name] = rule
rule_extension = rule['extension']
if rule_extension.startswith('.'):
rule_extension = rule_extension[1:]
if rule_extension in rule_extensions:
raise GypError(('extension %s associated with multiple rules, ' +
'target %s rules %s and %s') %
(rule_extension, target,
rule_extensions[rule_extension]['rule_name'],
rule_name))
rule_extensions[rule_extension] = rule
# Make sure rule_sources isn't already there. It's going to be
# created below if needed.
if 'rule_sources' in rule:
raise GypError(
'rule_sources must not exist in input, target %s rule %s' %
(target, rule_name))
rule_sources = []
source_keys = ['sources']
source_keys.extend(extra_sources_for_rules)
for source_key in source_keys:
for source in target_dict.get(source_key, []):
(source_root, source_extension) = os.path.splitext(source)
if source_extension.startswith('.'):
source_extension = source_extension[1:]
if source_extension == rule_extension:
rule_sources.append(source)
if len(rule_sources) > 0:
rule['rule_sources'] = rule_sources
def ValidateRunAsInTarget(target, target_dict, build_file):
target_name = target_dict.get('target_name')
run_as = target_dict.get('run_as')
if not run_as:
return
if type(run_as) is not dict:
raise GypError("The 'run_as' in target %s from file %s should be a "
"dictionary." %
(target_name, build_file))
action = run_as.get('action')
if not action:
raise GypError("The 'run_as' in target %s from file %s must have an "
"'action' section." %
(target_name, build_file))
if type(action) is not list:
raise GypError("The 'action' for 'run_as' in target %s from file %s "
"must be a list." %
(target_name, build_file))
working_directory = run_as.get('working_directory')
if working_directory and type(working_directory) is not str:
raise GypError("The 'working_directory' for 'run_as' in target %s "
"in file %s should be a string." %
(target_name, build_file))
environment = run_as.get('environment')
if environment and type(environment) is not dict:
raise GypError("The 'environment' for 'run_as' in target %s "
"in file %s should be a dictionary." %
(target_name, build_file))
def ValidateActionsInTarget(target, target_dict, build_file):
'''Validates the inputs to the actions in a target.'''
target_name = target_dict.get('target_name')
actions = target_dict.get('actions', [])
for action in actions:
action_name = action.get('action_name')
if not action_name:
raise GypError("Anonymous action in target %s. "
"An action must have an 'action_name' field." %
target_name)
inputs = action.get('inputs', None)
if inputs is None:
raise GypError('Action in target %s has no inputs.' % target_name)
action_command = action.get('action')
if action_command and not action_command[0]:
raise GypError("Empty action as command in target %s." % target_name)
def TurnIntIntoStrInDict(the_dict):
"""Given dict the_dict, recursively converts all integers into strings.
"""
# Use items instead of iteritems because there's no need to try to look at
# reinserted keys and their associated values.
for k, v in the_dict.items():
if type(v) is int:
v = str(v)
the_dict[k] = v
elif type(v) is dict:
TurnIntIntoStrInDict(v)
elif type(v) is list:
TurnIntIntoStrInList(v)
if type(k) is int:
del the_dict[k]
the_dict[str(k)] = v
def TurnIntIntoStrInList(the_list):
"""Given list the_list, recursively converts all integers into strings.
"""
for index in xrange(0, len(the_list)):
item = the_list[index]
if type(item) is int:
the_list[index] = str(item)
elif type(item) is dict:
TurnIntIntoStrInDict(item)
elif type(item) is list:
TurnIntIntoStrInList(item)
def PruneUnwantedTargets(targets, flat_list, dependency_nodes, root_targets,
data):
"""Return only the targets that are deep dependencies of |root_targets|."""
qualified_root_targets = []
for target in root_targets:
target = target.strip()
qualified_targets = gyp.common.FindQualifiedTargets(target, flat_list)
if not qualified_targets:
raise GypError("Could not find target %s" % target)
qualified_root_targets.extend(qualified_targets)
wanted_targets = {}
for target in qualified_root_targets:
wanted_targets[target] = targets[target]
for dependency in dependency_nodes[target].DeepDependencies():
wanted_targets[dependency] = targets[dependency]
wanted_flat_list = [t for t in flat_list if t in wanted_targets]
# Prune unwanted targets from each build_file's data dict.
for build_file in data['target_build_files']:
if not 'targets' in data[build_file]:
continue
new_targets = []
for target in data[build_file]['targets']:
qualified_name = gyp.common.QualifiedTarget(build_file,
target['target_name'],
target['toolset'])
if qualified_name in wanted_targets:
new_targets.append(target)
data[build_file]['targets'] = new_targets
return wanted_targets, wanted_flat_list
def VerifyNoCollidingTargets(targets):
"""Verify that no two targets in the same directory share the same name.
Arguments:
targets: A list of targets in the form 'path/to/file.gyp:target_name'.
"""
# Keep a dict going from 'subdirectory:target_name' to 'foo.gyp'.
used = {}
for target in targets:
# Separate out 'path/to/file.gyp, 'target_name' from
# 'path/to/file.gyp:target_name'.
path, name = target.rsplit(':', 1)
# Separate out 'path/to', 'file.gyp' from 'path/to/file.gyp'.
subdir, gyp = os.path.split(path)
# Use '.' for the current directory '', so that the error messages make
# more sense.
if not subdir:
subdir = '.'
# Prepare a key like 'path/to:target_name'.
key = subdir + ':' + name
if key in used:
# Complain if this target is already used.
raise GypError('Duplicate target name "%s" in directory "%s" used both '
'in "%s" and "%s".' % (name, subdir, gyp, used[key]))
used[key] = gyp
def SetGeneratorGlobals(generator_input_info):
# Set up path_sections and non_configuration_keys with the default data plus
# the generator-specific data.
global path_sections
path_sections = set(base_path_sections)
path_sections.update(generator_input_info['path_sections'])
global non_configuration_keys
non_configuration_keys = base_non_configuration_keys[:]
non_configuration_keys.extend(generator_input_info['non_configuration_keys'])
global multiple_toolsets
multiple_toolsets = generator_input_info[
'generator_supports_multiple_toolsets']
global generator_filelist_paths
generator_filelist_paths = generator_input_info['generator_filelist_paths']
def Load(build_files, variables, includes, depth, generator_input_info, check,
circular_check, duplicate_basename_check, parallel, root_targets):
SetGeneratorGlobals(generator_input_info)
# A generator can have other lists (in addition to sources) be processed
# for rules.
extra_sources_for_rules = generator_input_info['extra_sources_for_rules']
# Load build files. This loads every target-containing build file into
# the |data| dictionary such that the keys to |data| are build file names,
# and the values are the entire build file contents after "early" or "pre"
# processing has been done and includes have been resolved.
# NOTE: data contains both "target" files (.gyp) and "includes" (.gypi), as
# well as meta-data (e.g. 'included_files' key). 'target_build_files' keeps
# track of the keys corresponding to "target" files.
data = {'target_build_files': set()}
# Normalize paths everywhere. This is important because paths will be
# used as keys to the data dict and for references between input files.
build_files = set(map(os.path.normpath, build_files))
if parallel:
LoadTargetBuildFilesParallel(build_files, data, variables, includes, depth,
check, generator_input_info)
else:
aux_data = {}
for build_file in build_files:
try:
LoadTargetBuildFile(build_file, data, aux_data,
variables, includes, depth, check, True)
except Exception, e:
gyp.common.ExceptionAppend(e, 'while trying to load %s' % build_file)
raise
# Build a dict to access each target's subdict by qualified name.
targets = BuildTargetsDict(data)
# Fully qualify all dependency links.
QualifyDependencies(targets)
# Remove self-dependencies from targets that have 'prune_self_dependencies'
# set to 1.
RemoveSelfDependencies(targets)
# Expand dependencies specified as build_file:*.
ExpandWildcardDependencies(targets, data)
# Remove all dependencies marked as 'link_dependency' from the targets of
# type 'none'.
RemoveLinkDependenciesFromNoneTargets(targets)
# Apply exclude (!) and regex (/) list filters only for dependency_sections.
for target_name, target_dict in targets.iteritems():
tmp_dict = {}
for key_base in dependency_sections:
for op in ('', '!', '/'):
key = key_base + op
if key in target_dict:
tmp_dict[key] = target_dict[key]
del target_dict[key]
ProcessListFiltersInDict(target_name, tmp_dict)
# Write the results back to |target_dict|.
for key in tmp_dict:
target_dict[key] = tmp_dict[key]
# Make sure every dependency appears at most once.
RemoveDuplicateDependencies(targets)
if circular_check:
# Make sure that any targets in a.gyp don't contain dependencies in other
# .gyp files that further depend on a.gyp.
VerifyNoGYPFileCircularDependencies(targets)
[dependency_nodes, flat_list] = BuildDependencyList(targets)
if root_targets:
# Remove, from |targets| and |flat_list|, the targets that are not deep
# dependencies of the targets specified in |root_targets|.
targets, flat_list = PruneUnwantedTargets(
targets, flat_list, dependency_nodes, root_targets, data)
# Check that no two targets in the same directory have the same name.
VerifyNoCollidingTargets(flat_list)
# Handle dependent settings of various types.
for settings_type in ['all_dependent_settings',
'direct_dependent_settings',
'link_settings']:
DoDependentSettings(settings_type, flat_list, targets, dependency_nodes)
# Take out the dependent settings now that they've been published to all
# of the targets that require them.
for target in flat_list:
if settings_type in targets[target]:
del targets[target][settings_type]
# Make sure static libraries don't declare dependencies on other static
# libraries, but that linkables depend on all unlinked static libraries
# that they need so that their link steps will be correct.
gii = generator_input_info
if gii['generator_wants_static_library_dependencies_adjusted']:
AdjustStaticLibraryDependencies(flat_list, targets, dependency_nodes,
gii['generator_wants_sorted_dependencies'])
# Apply "post"/"late"/"target" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATE, variables, build_file)
# Move everything that can go into a "configurations" section into one.
for target in flat_list:
target_dict = targets[target]
SetUpConfigurations(target, target_dict)
# Apply exclude (!) and regex (/) list filters.
for target in flat_list:
target_dict = targets[target]
ProcessListFiltersInDict(target, target_dict)
# Apply "latelate" variable expansions and condition evaluations.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ProcessVariablesAndConditionsInDict(
target_dict, PHASE_LATELATE, variables, build_file)
# Make sure that the rules make sense, and build up rule_sources lists as
# needed. Not all generators will need to use the rule_sources lists, but
# some may, and it seems best to build the list in a common spot.
# Also validate actions and run_as elements in targets.
for target in flat_list:
target_dict = targets[target]
build_file = gyp.common.BuildFile(target)
ValidateTargetType(target, target_dict)
ValidateSourcesInTarget(target, target_dict, build_file,
duplicate_basename_check)
ValidateRulesInTarget(target, target_dict, extra_sources_for_rules)
ValidateRunAsInTarget(target, target_dict, build_file)
ValidateActionsInTarget(target, target_dict, build_file)
# Generators might not expect ints. Turn them into strs.
TurnIntIntoStrInDict(data)
# TODO(mark): Return |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
return [flat_list, targets, data]
|
mit
|
asadziach/tensorflow
|
tensorflow/contrib/bayesflow/python/ops/entropy.py
|
85
|
1246
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for Entropy Ops. See ${python/contrib.bayesflow.entropy}."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.contrib.bayesflow.python.ops.entropy_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'ELBOForms', 'elbo_ratio', 'entropy_shannon', 'renyi_ratio', 'renyi_alpha'
]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
ymcagodme/Norwalk-Judo
|
django/contrib/gis/gdal/srs.py
|
291
|
11717
|
"""
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print srs
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print srs.proj
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print srs.ellipsoid
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print srs.projected, srs.geographic
False True
>>> srs.import_epsg(32140)
>>> print srs.name
NAD83 / Texas South Central
"""
import re
from ctypes import byref, c_char_p, c_int, c_void_p
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import OGRException, SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
buf = c_char_p('')
srs_type = 'user'
if isinstance(srs_input, basestring):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, unicode):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, (int, long)):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr: capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]')
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print srs['GEOGCS']
WGS 84
>>> print srs['DATUM']
WGS_1984
>>> print srs['AUTHORITY']
EPSG
>>> print srs['AUTHORITY', 1] # The authority value
4326
>>> print srs['TOWGS84', 4] # the fourth value in this wkt
0
>>> print srs['UNIT|AUTHORITY'] # For the units authority, have to use the pipe symbole.
EPSG
>>> print srs['UNIT|AUTHORITY', 1] # The authority value for the untis
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, basestring) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, target, index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, target)
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, target)
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected: return self.attr_value('PROJCS')
elif self.geographic: return self.attr_value('GEOGCS')
elif self.local: return self.attr_value('LOCAL_CS')
else: return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
if self.projected or self.local:
return capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
return capi.angular_units(self.ptr, byref(c_char_p()))
else:
return (None, None)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, user_input)
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr: capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
|
bsd-3-clause
|
samuelchong/libcloud
|
libcloud/test/common/test_cloudstack.py
|
4
|
7293
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import unittest
try:
import simplejson as json
except ImportError:
import json
from libcloud.utils.py3 import httplib
from libcloud.utils.py3 import urlparse
from libcloud.utils.py3 import b
from libcloud.utils.py3 import parse_qsl
from libcloud.common.cloudstack import CloudStackConnection
from libcloud.common.types import MalformedResponseError
from libcloud.test import MockHttpTestCase
async_delay = 0
class CloudStackMockDriver(object):
host = 'nonexistent.'
path = '/path'
async_poll_frequency = 0
name = 'fake'
async_delay = 0
class CloudStackCommonTest(unittest.TestCase):
def setUp(self):
CloudStackConnection.conn_class = CloudStackMockHttp
self.connection = CloudStackConnection('apikey', 'secret',
host=CloudStackMockDriver.host)
self.connection.poll_interval = 0.0
self.driver = self.connection.driver = CloudStackMockDriver()
def test_sync_request_bad_response(self):
self.driver.path = '/bad/response'
try:
self.connection._sync_request('fake')
except Exception:
e = sys.exc_info()[1]
self.assertTrue(isinstance(e, MalformedResponseError))
return
self.assertTrue(False)
def test_sync_request(self):
self.driver.path = '/sync'
self.connection._sync_request('fake')
def test_async_request_successful(self):
self.driver.path = '/async/success'
result = self.connection._async_request('fake')
self.assertEqual(result, {'fake': 'result'})
def test_async_request_unsuccessful(self):
self.driver.path = '/async/fail'
try:
self.connection._async_request('fake')
except Exception:
e = sys.exc_info()[1]
self.assertEqual(CloudStackMockHttp.ERROR_TEXT, str(e))
return
self.assertFalse(True)
def test_async_request_delayed(self):
global async_delay
self.driver.path = '/async/delayed'
async_delay = 2
self.connection._async_request('fake')
self.assertEqual(async_delay, 0)
def test_signature_algorithm(self):
cases = [
(
{
'command': 'listVirtualMachines'
}, 'z/a9Y7J52u48VpqIgiwaGUMCso0='
), (
{
'command': 'deployVirtualMachine',
'name': 'fred',
'displayname': 'George',
'serviceofferingid': 5,
'templateid': 17,
'zoneid': 23,
'networkids': 42
}, 'gHTo7mYmadZ+zluKHzlEKb1i/QU='
), (
{
'command': 'deployVirtualMachine',
'name': 'fred',
'displayname': 'George+Ringo',
'serviceofferingid': 5,
'templateid': 17,
'zoneid': 23,
'networkids': 42
}, 'tAgfrreI1ZvWlWLClD3gu4+aKv4='
)
]
connection = CloudStackConnection('fnord', 'abracadabra')
for case in cases:
params = connection.add_default_params(case[0])
self.assertEqual(connection._make_signature(params), b(case[1]))
class CloudStackMockHttp(MockHttpTestCase):
ERROR_TEXT = 'ERROR TEXT'
def _response(self, status, result, response):
return (status, json.dumps(result), result, response)
def _check_request(self, url):
url = urlparse.urlparse(url)
query = dict(parse_qsl(url.query))
self.assertTrue('apiKey' in query)
self.assertTrue('command' in query)
self.assertTrue('response' in query)
self.assertTrue('signature' in query)
self.assertTrue(query['response'] == 'json')
return query
def _bad_response(self, method, url, body, headers):
self._check_request(url)
result = {'success': True}
return self._response(httplib.OK, result, httplib.responses[httplib.OK])
def _sync(self, method, url, body, headers):
query = self._check_request(url)
result = {query['command'].lower() + 'response': {}}
return self._response(httplib.OK, result, httplib.responses[httplib.OK])
def _async_success(self, method, url, body, headers):
query = self._check_request(url)
if query['command'].lower() == 'queryasyncjobresult':
self.assertEqual(query['jobid'], '42')
result = {
query['command'].lower() + 'response': {
'jobstatus': 1,
'jobresult': {'fake': 'result'}
}
}
else:
result = {query['command'].lower() + 'response': {'jobid': '42'}}
return self._response(httplib.OK, result, httplib.responses[httplib.OK])
def _async_fail(self, method, url, body, headers):
query = self._check_request(url)
if query['command'].lower() == 'queryasyncjobresult':
self.assertEqual(query['jobid'], '42')
result = {
query['command'].lower() + 'response': {
'jobstatus': 2,
'jobresult': {'errortext': self.ERROR_TEXT}
}
}
else:
result = {query['command'].lower() + 'response': {'jobid': '42'}}
return self._response(httplib.OK, result, httplib.responses[httplib.OK])
def _async_delayed(self, method, url, body, headers):
global async_delay
query = self._check_request(url)
if query['command'].lower() == 'queryasyncjobresult':
self.assertEqual(query['jobid'], '42')
if async_delay == 0:
result = {
query['command'].lower() + 'response': {
'jobstatus': 1,
'jobresult': {'fake': 'result'}
}
}
else:
result = {
query['command'].lower() + 'response': {
'jobstatus': 0,
}
}
async_delay -= 1
else:
result = {query['command'].lower() + 'response': {'jobid': '42'}}
return self._response(httplib.OK, result, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
|
apache-2.0
|
fldc/CouchPotatoServer
|
libs/tornado/auth.py
|
102
|
61853
|
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""This module contains implementations of various third-party
authentication schemes.
All the classes in this file are class mixins designed to be used with
the `tornado.web.RequestHandler` class. They are used in two ways:
* On a login handler, use methods such as ``authenticate_redirect()``,
``authorize_redirect()``, and ``get_authenticated_user()`` to
establish the user's identity and store authentication tokens to your
database and/or cookies.
* In non-login handlers, use methods such as ``facebook_request()``
or ``twitter_request()`` to use the authentication tokens to make
requests to the respective services.
They all take slightly different arguments due to the fact all these
services implement authentication and authorization slightly differently.
See the individual service classes below for complete documentation.
Example usage for Google OpenID::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
user = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
.. versionchanged:: 4.0
All of the callback interfaces in this module are now guaranteed
to run their callback with an argument of ``None`` on error.
Previously some functions would do this while others would simply
terminate the request on their own. This change also ensures that
errors are more consistently reported through the ``Future`` interfaces.
"""
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
import functools
import hashlib
import hmac
import time
import uuid
from tornado.concurrent import TracebackFuture, chain_future, return_future
from tornado import gen
from tornado import httpclient
from tornado import escape
from tornado.httputil import url_concat
from tornado.log import gen_log
from tornado.stack_context import ExceptionStackContext
from tornado.util import u, unicode_type, ArgReplacer
try:
import urlparse # py2
except ImportError:
import urllib.parse as urlparse # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
try:
long # py2
except NameError:
long = int # py3
class AuthError(Exception):
pass
def _auth_future_to_callback(callback, future):
try:
result = future.result()
except AuthError as e:
gen_log.warning(str(e))
result = None
callback(result)
def _auth_return_future(f):
"""Similar to tornado.concurrent.return_future, but uses the auth
module's legacy callback interface.
Note that when using this decorator the ``callback`` parameter
inside the function will actually be a future.
"""
replacer = ArgReplacer(f, 'callback')
@functools.wraps(f)
def wrapper(*args, **kwargs):
future = TracebackFuture()
callback, args, kwargs = replacer.replace(future, args, kwargs)
if callback is not None:
future.add_done_callback(
functools.partial(_auth_future_to_callback, callback))
def handle_exception(typ, value, tb):
if future.done():
return False
else:
future.set_exc_info((typ, value, tb))
return True
with ExceptionStackContext(handle_exception):
f(*args, **kwargs)
return future
return wrapper
class OpenIdMixin(object):
"""Abstract implementation of OpenID and Attribute Exchange.
See `GoogleMixin` below for a customized example (which also
includes OAuth support).
Class attributes:
* ``_OPENID_ENDPOINT``: the identity provider's URI.
"""
@return_future
def authenticate_redirect(self, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Redirects to the authentication URL for this service.
After authentication, the service will redirect back to the given
callback URI with additional parameters including ``openid.mode``.
We request the given attributes for the authenticated user by
default (name, email, language, and username). If you don't need
all those attributes for your app, you can request fewer with
the ax_attrs keyword argument.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Fetches the authenticated user data upon redirect.
This method should be called by the handler that receives the
redirect from the `authenticate_redirect()` method (which is
often the same as the one that calls it; in that case you would
call `get_authenticated_user` if the ``openid.mode`` parameter
is present and `authenticate_redirect` if it is not).
The result of this method will generally be used to set a cookie.
"""
# Verify the OpenID response via direct request to the OP
args = dict((k, v[-1]) for k, v in self.request.arguments.items())
args["openid.mode"] = u("check_authentication")
url = self._OPENID_ENDPOINT
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(url, functools.partial(
self._on_authentication_verified, callback),
method="POST", body=urllib_parse.urlencode(args))
def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None):
url = urlparse.urljoin(self.request.full_url(), callback_uri)
args = {
"openid.ns": "http://specs.openid.net/auth/2.0",
"openid.claimed_id":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.identity":
"http://specs.openid.net/auth/2.0/identifier_select",
"openid.return_to": url,
"openid.realm": urlparse.urljoin(url, '/'),
"openid.mode": "checkid_setup",
}
if ax_attrs:
args.update({
"openid.ns.ax": "http://openid.net/srv/ax/1.0",
"openid.ax.mode": "fetch_request",
})
ax_attrs = set(ax_attrs)
required = []
if "name" in ax_attrs:
ax_attrs -= set(["name", "firstname", "fullname", "lastname"])
required += ["firstname", "fullname", "lastname"]
args.update({
"openid.ax.type.firstname":
"http://axschema.org/namePerson/first",
"openid.ax.type.fullname":
"http://axschema.org/namePerson",
"openid.ax.type.lastname":
"http://axschema.org/namePerson/last",
})
known_attrs = {
"email": "http://axschema.org/contact/email",
"language": "http://axschema.org/pref/language",
"username": "http://axschema.org/namePerson/friendly",
}
for name in ax_attrs:
args["openid.ax.type." + name] = known_attrs[name]
required.append(name)
args["openid.ax.required"] = ",".join(required)
if oauth_scope:
args.update({
"openid.ns.oauth":
"http://specs.openid.net/extensions/oauth/1.0",
"openid.oauth.consumer": self.request.host.split(":")[0],
"openid.oauth.scope": oauth_scope,
})
return args
def _on_authentication_verified(self, future, response):
if response.error or b"is_valid:true" not in response.body:
future.set_exception(AuthError(
"Invalid OpenID response: %s" % (response.error or
response.body)))
return
# Make sure we got back at least an email from attribute exchange
ax_ns = None
for name in self.request.arguments:
if name.startswith("openid.ns.") and \
self.get_argument(name) == u("http://openid.net/srv/ax/1.0"):
ax_ns = name[10:]
break
def get_ax_arg(uri):
if not ax_ns:
return u("")
prefix = "openid." + ax_ns + ".type."
ax_name = None
for name in self.request.arguments.keys():
if self.get_argument(name) == uri and name.startswith(prefix):
part = name[len(prefix):]
ax_name = "openid." + ax_ns + ".value." + part
break
if not ax_name:
return u("")
return self.get_argument(ax_name, u(""))
email = get_ax_arg("http://axschema.org/contact/email")
name = get_ax_arg("http://axschema.org/namePerson")
first_name = get_ax_arg("http://axschema.org/namePerson/first")
last_name = get_ax_arg("http://axschema.org/namePerson/last")
username = get_ax_arg("http://axschema.org/namePerson/friendly")
locale = get_ax_arg("http://axschema.org/pref/language").lower()
user = dict()
name_parts = []
if first_name:
user["first_name"] = first_name
name_parts.append(first_name)
if last_name:
user["last_name"] = last_name
name_parts.append(last_name)
if name:
user["name"] = name
elif name_parts:
user["name"] = u(" ").join(name_parts)
elif email:
user["name"] = email.split("@")[0]
if email:
user["email"] = email
if locale:
user["locale"] = locale
if username:
user["username"] = username
claimed_id = self.get_argument("openid.claimed_id", None)
if claimed_id:
user["claimed_id"] = claimed_id
future.set_result(user)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuthMixin(object):
"""Abstract implementation of OAuth 1.0 and 1.0a.
See `TwitterMixin` and `FriendFeedMixin` below for example implementations,
or `GoogleMixin` for an OAuth/OpenID hybrid.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url.
* ``_OAUTH_VERSION``: May be either "1.0" or "1.0a".
* ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires
advance registration of callbacks.
Subclasses must also override the `_oauth_get_user_future` and
`_oauth_consumer_token` methods.
"""
@return_future
def authorize_redirect(self, callback_uri=None, extra_params=None,
http_client=None, callback=None):
"""Redirects the user to obtain OAuth authorization for this service.
The ``callback_uri`` may be omitted if you have previously
registered a callback URI with the third-party service. For
some services (including Friendfeed), you must use a
previously-registered callback URI and cannot specify a
callback via this method.
This method sets a cookie called ``_oauth_request_token`` which is
subsequently used (and cleared) in `get_authenticated_user` for
security purposes.
Note that this method is asynchronous, although it calls
`.RequestHandler.finish` for you so it may not be necessary
to pass a callback or use the `.Future` it returns. However,
if this method is called from a function decorated with
`.gen.coroutine`, you must call it with ``yield`` to keep the
response from being closed prematurely.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False):
raise Exception("This service does not support oauth_callback")
if http_client is None:
http_client = self.get_auth_http_client()
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
http_client.fetch(
self._oauth_request_token_url(callback_uri=callback_uri,
extra_params=extra_params),
functools.partial(
self._on_request_token,
self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
else:
http_client.fetch(
self._oauth_request_token_url(),
functools.partial(
self._on_request_token, self._OAUTH_AUTHORIZE_URL,
callback_uri,
callback))
@_auth_return_future
def get_authenticated_user(self, callback, http_client=None):
"""Gets the OAuth authorized user and access token.
This method should be called from the handler for your
OAuth callback URL to complete the registration process. We run the
callback with the authenticated user dictionary. This dictionary
will contain an ``access_key`` which can be used to make authorized
requests to this service on behalf of the user. The dictionary will
also contain other fields such as ``name``, depending on the service
used.
"""
future = callback
request_key = escape.utf8(self.get_argument("oauth_token"))
oauth_verifier = self.get_argument("oauth_verifier", None)
request_cookie = self.get_cookie("_oauth_request_token")
if not request_cookie:
future.set_exception(AuthError(
"Missing OAuth request token cookie"))
return
self.clear_cookie("_oauth_request_token")
cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")]
if cookie_key != request_key:
future.set_exception(AuthError(
"Request token does not match cookie"))
return
token = dict(key=cookie_key, secret=cookie_secret)
if oauth_verifier:
token["verifier"] = oauth_verifier
if http_client is None:
http_client = self.get_auth_http_client()
http_client.fetch(self._oauth_access_token_url(token),
functools.partial(self._on_access_token, callback))
def _oauth_request_token_url(self, callback_uri=None, extra_params=None):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
if callback_uri == "oob":
args["oauth_callback"] = "oob"
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
if extra_params:
args.update(extra_params)
signature = _oauth10a_signature(consumer_token, "GET", url, args)
else:
signature = _oauth_signature(consumer_token, "GET", url, args)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, callback,
response):
if response.error:
raise Exception("Could not get request token: %s" % response.error)
request_token = _oauth_parse_response(response.body)
data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" +
base64.b64encode(escape.utf8(request_token["secret"])))
self.set_cookie("_oauth_request_token", data)
args = dict(oauth_token=request_token["key"])
if callback_uri == "oob":
self.finish(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
return
elif callback_uri:
args["oauth_callback"] = urlparse.urljoin(
self.request.full_url(), callback_uri)
self.redirect(authorize_url + "?" + urllib_parse.urlencode(args))
callback()
def _oauth_access_token_url(self, request_token):
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(request_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
if "verifier" in request_token:
args["oauth_verifier"] = request_token["verifier"]
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, "GET", url, args,
request_token)
else:
signature = _oauth_signature(consumer_token, "GET", url, args,
request_token)
args["oauth_signature"] = signature
return url + "?" + urllib_parse.urlencode(args)
def _on_access_token(self, future, response):
if response.error:
future.set_exception(AuthError("Could not fetch access token"))
return
access_token = _oauth_parse_response(response.body)
self._oauth_get_user_future(access_token).add_done_callback(
functools.partial(self._on_oauth_get_user, access_token, future))
def _oauth_consumer_token(self):
"""Subclasses must override this to return their OAuth consumer keys.
The return value should be a `dict` with keys ``key`` and ``secret``.
"""
raise NotImplementedError()
@return_future
def _oauth_get_user_future(self, access_token, callback):
"""Subclasses must override this to get basic information about the
user.
Should return a `.Future` whose result is a dictionary
containing information about the user, which may have been
retrieved by using ``access_token`` to make a request to the
service.
The access token will be added to the returned dictionary to make
the result of `get_authenticated_user`.
For backwards compatibility, the callback-based ``_oauth_get_user``
method is also supported.
"""
# By default, call the old-style _oauth_get_user, but new code
# should override this method instead.
self._oauth_get_user(access_token, callback)
def _oauth_get_user(self, access_token, callback):
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, future, user_future):
if user_future.exception() is not None:
future.set_exception(user_future.exception())
return
user = user_future.result()
if not user:
future.set_exception(AuthError("Error getting user"))
return
user["access_token"] = access_token
future.set_result(user)
def _oauth_request_parameters(self, url, access_token, parameters={},
method="GET"):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key=escape.to_basestring(consumer_token["key"]),
oauth_token=escape.to_basestring(access_token["key"]),
oauth_signature_method="HMAC-SHA1",
oauth_timestamp=str(int(time.time())),
oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)),
oauth_version="1.0",
)
args = {}
args.update(base_args)
args.update(parameters)
if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a":
signature = _oauth10a_signature(consumer_token, method, url, args,
access_token)
else:
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args["oauth_signature"] = escape.to_basestring(signature)
return base_args
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class OAuth2Mixin(object):
"""Abstract implementation of OAuth 2.0.
See `FacebookGraphMixin` below for an example implementation.
Class attributes:
* ``_OAUTH_AUTHORIZE_URL``: The service's authorization url.
* ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url.
"""
@return_future
def authorize_redirect(self, redirect_uri=None, client_id=None,
client_secret=None, extra_params=None,
callback=None, scope=None, response_type="code"):
"""Redirects the user to obtain OAuth authorization for this service.
Some providers require that you register a redirect URL with
your application instead of passing one via this method. You
should call this method to log the user in, and then call
``get_authenticated_user`` in the handler for your
redirect URL to complete the authorization process.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
args = {
"redirect_uri": redirect_uri,
"client_id": client_id,
"response_type": response_type
}
if extra_params:
args.update(extra_params)
if scope:
args['scope'] = ' '.join(scope)
self.redirect(
url_concat(self._OAUTH_AUTHORIZE_URL, args))
callback()
def _oauth_request_token_url(self, redirect_uri=None, client_id=None,
client_secret=None, code=None,
extra_params=None):
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
redirect_uri=redirect_uri,
code=code,
client_id=client_id,
client_secret=client_secret,
)
if extra_params:
args.update(extra_params)
return url_concat(url, args)
class TwitterMixin(OAuthMixin):
"""Twitter OAuth authentication.
To authenticate with Twitter, register your application with
Twitter at http://twitter.com/apps. Then copy your Consumer Key
and Consumer Secret to the application
`~tornado.web.Application.settings` ``twitter_consumer_key`` and
``twitter_consumer_secret``. Use this mixin on the handler for the
URL you registered as your application's callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with Twitter and get access to their stream::
class TwitterLoginHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
The user object returned by `~OAuthMixin.get_authenticated_user`
includes the attributes ``username``, ``name``, ``access_token``,
and all of the custom Twitter user attributes described at
https://dev.twitter.com/docs/api/1.1/get/users/show
"""
_OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize"
_OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate"
_OAUTH_NO_CALLBACKS = False
_TWITTER_BASE_URL = "https://api.twitter.com/1.1"
@return_future
def authenticate_redirect(self, callback_uri=None, callback=None):
"""Just like `~OAuthMixin.authorize_redirect`, but
auto-redirects if authorized.
This is generally the right interface to use if you are using
Twitter for single-sign on.
.. versionchanged:: 3.1
Now returns a `.Future` and takes an optional callback, for
compatibility with `.gen.coroutine`.
"""
http = self.get_auth_http_client()
http.fetch(self._oauth_request_token_url(callback_uri=callback_uri),
functools.partial(
self._on_request_token, self._OAUTH_AUTHENTICATE_URL,
None, callback))
@_auth_return_future
def twitter_request(self, path, callback=None, access_token=None,
post_args=None, **args):
"""Fetches the given API path, e.g., ``statuses/user_timeline/btaylor``
The path should not include the format or API version number.
(we automatically use JSON format and API version 1).
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the Twitter methods are documented at http://dev.twitter.com/
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned through that
process includes an 'access_token' attribute that can be used
to make authenticated requests via this method. Example
usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.TwitterMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.twitter_request(
"/statuses/update",
post_args={"status": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
"""
if path.startswith('http:') or path.startswith('https:'):
# Raw urls are useful for e.g. search which doesn't follow the
# usual pattern: http://search.twitter.com/search.json
url = path
else:
url = self._TWITTER_BASE_URL + path + ".json"
# Add the OAuth resource request signature if we have credentials
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http_callback = functools.partial(self._on_twitter_request, callback)
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=http_callback)
else:
http.fetch(url, callback=http_callback)
def _on_twitter_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("twitter_consumer_key", "Twitter OAuth")
self.require_setting("twitter_consumer_secret", "Twitter OAuth")
return dict(
key=self.settings["twitter_consumer_key"],
secret=self.settings["twitter_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token):
user = yield self.twitter_request(
"/account/verify_credentials",
access_token=access_token)
if user:
user["username"] = user["screen_name"]
raise gen.Return(user)
class FriendFeedMixin(OAuthMixin):
"""FriendFeed OAuth authentication.
To authenticate with FriendFeed, register your application with
FriendFeed at http://friendfeed.com/api/applications. Then copy
your Consumer Key and Consumer Secret to the application
`~tornado.web.Application.settings` ``friendfeed_consumer_key``
and ``friendfeed_consumer_secret``. Use this mixin on the handler
for the URL you registered as your application's Callback URL.
When your application is set up, you can use this mixin like this
to authenticate the user with FriendFeed and get access to their feed::
class FriendFeedLoginHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("oauth_token", None):
user = yield self.get_authenticated_user()
# Save the user using e.g. set_secure_cookie()
else:
yield self.authorize_redirect()
The user object returned by `~OAuthMixin.get_authenticated_user()` includes the
attributes ``username``, ``name``, and ``description`` in addition to
``access_token``. You should save the access token with the user;
it is required to make requests on behalf of the user later with
`friendfeed_request()`.
"""
_OAUTH_VERSION = "1.0"
_OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token"
_OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token"
_OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize"
_OAUTH_NO_CALLBACKS = True
_OAUTH_VERSION = "1.0"
@_auth_return_future
def friendfeed_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/bret/friends"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
All the FriendFeed methods are documented at
http://friendfeed.com/api/documentation.
Many methods require an OAuth access token which you can
obtain through `~OAuthMixin.authorize_redirect` and
`~OAuthMixin.get_authenticated_user`. The user returned
through that process includes an ``access_token`` attribute that
can be used to make authenticated requests via this
method.
Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FriendFeedMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.friendfeed_request(
"/entry",
post_args={"body": "Testing Tornado Web Server"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
"""
# Add the OAuth resource request signature if we have credentials
url = "http://friendfeed-api.com/v2" + path
if access_token:
all_args = {}
all_args.update(args)
all_args.update(post_args or {})
method = "POST" if post_args is not None else "GET"
oauth = self._oauth_request_parameters(
url, access_token, all_args, method=method)
args.update(oauth)
if args:
url += "?" + urllib_parse.urlencode(args)
callback = functools.partial(self._on_friendfeed_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_friendfeed_request(self, future, response):
if response.error:
future.set_exception(AuthError(
"Error response %s fetching %s" % (response.error,
response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def _oauth_consumer_token(self):
self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth")
self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth")
return dict(
key=self.settings["friendfeed_consumer_key"],
secret=self.settings["friendfeed_consumer_secret"])
@gen.coroutine
def _oauth_get_user_future(self, access_token, callback):
user = yield self.friendfeed_request(
"/feedinfo/" + access_token["username"],
include="id,name,description", access_token=access_token)
if user:
user["username"] = user["id"]
callback(user)
def _parse_user_response(self, callback, user):
if user:
user["username"] = user["id"]
callback(user)
class GoogleMixin(OpenIdMixin, OAuthMixin):
"""Google Open ID / OAuth authentication.
.. deprecated:: 4.0
New applications should use `GoogleOAuth2Mixin`
below instead of this class. As of May 19, 2014, Google has stopped
supporting registration-free authentication.
No application registration is necessary to use Google for
authentication or to access Google resources on behalf of a user.
Google implements both OpenID and OAuth in a hybrid mode. If you
just need the user's identity, use
`~OpenIdMixin.authenticate_redirect`. If you need to make
requests to Google on behalf of the user, use
`authorize_redirect`. On return, parse the response with
`~OpenIdMixin.get_authenticated_user`. We send a dict containing
the values for the user, including ``email``, ``name``, and
``locale``.
Example usage::
class GoogleLoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("openid.mode", None):
user = yield self.get_authenticated_user()
# Save the user with e.g. set_secure_cookie()
else:
yield self.authenticate_redirect()
"""
_OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud"
_OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken"
@return_future
def authorize_redirect(self, oauth_scope, callback_uri=None,
ax_attrs=["name", "email", "language", "username"],
callback=None):
"""Authenticates and authorizes for the given Google resource.
Some of the available resources which can be used in the ``oauth_scope``
argument are:
* Gmail Contacts - http://www.google.com/m8/feeds/
* Calendar - http://www.google.com/calendar/feeds/
* Finance - http://finance.google.com/finance/feeds/
You can authorize multiple resources by separating the resource
URLs with a space.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
callback_uri = callback_uri or self.request.uri
args = self._openid_args(callback_uri, ax_attrs=ax_attrs,
oauth_scope=oauth_scope)
self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args))
callback()
@_auth_return_future
def get_authenticated_user(self, callback):
"""Fetches the authenticated user data upon redirect."""
# Look to see if we are doing combined OpenID/OAuth
oauth_ns = ""
for name, values in self.request.arguments.items():
if name.startswith("openid.ns.") and \
values[-1] == b"http://specs.openid.net/extensions/oauth/1.0":
oauth_ns = name[10:]
break
token = self.get_argument("openid." + oauth_ns + ".request_token", "")
if token:
http = self.get_auth_http_client()
token = dict(key=token, secret="")
http.fetch(self._oauth_access_token_url(token),
functools.partial(self._on_access_token, callback))
else:
chain_future(OpenIdMixin.get_authenticated_user(self),
callback)
def _oauth_consumer_token(self):
self.require_setting("google_consumer_key", "Google OAuth")
self.require_setting("google_consumer_secret", "Google OAuth")
return dict(
key=self.settings["google_consumer_key"],
secret=self.settings["google_consumer_secret"])
def _oauth_get_user_future(self, access_token):
return OpenIdMixin.get_authenticated_user(self)
class GoogleOAuth2Mixin(OAuth2Mixin):
"""Google authentication using OAuth2.
In order to use, register your application with Google and copy the
relevant parameters to your application settings.
* Go to the Google Dev Console at http://console.developers.google.com
* Select a project, or create a new one.
* In the sidebar on the left, select APIs & Auth.
* In the list of APIs, find the Google+ API service and set it to ON.
* In the sidebar on the left, select Credentials.
* In the OAuth section of the page, select Create New Client ID.
* Set the Redirect URI to point to your auth handler
* Copy the "Client secret" and "Client ID" to the application settings as
{"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}}
.. versionadded:: 3.2
"""
_OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth"
_OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
_OAUTH_NO_CALLBACKS = False
_OAUTH_SETTINGS_KEY = 'google_oauth'
@_auth_return_future
def get_authenticated_user(self, redirect_uri, code, callback):
"""Handles the login for the Google user, returning a user object.
Example usage::
class GoogleOAuth2LoginHandler(tornado.web.RequestHandler,
tornado.auth.GoogleOAuth2Mixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument('code', False):
user = yield self.get_authenticated_user(
redirect_uri='http://your.site.com/auth/google',
code=self.get_argument('code'))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='http://your.site.com/auth/google',
client_id=self.settings['google_oauth']['key'],
scope=['profile', 'email'],
response_type='code',
extra_params={'approval_prompt': 'auto'})
"""
http = self.get_auth_http_client()
body = urllib_parse.urlencode({
"redirect_uri": redirect_uri,
"code": code,
"client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'],
"client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'],
"grant_type": "authorization_code",
})
http.fetch(self._OAUTH_ACCESS_TOKEN_URL,
functools.partial(self._on_access_token, callback),
method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body)
def _on_access_token(self, future, response):
"""Callback function for the exchange to the access token."""
if response.error:
future.set_exception(AuthError('Google auth error: %s' % str(response)))
return
args = escape.json_decode(response.body)
future.set_result(args)
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class FacebookMixin(object):
"""Facebook Connect authentication.
.. deprecated:: 1.1
New applications should use `FacebookGraphMixin`
below instead of this class. This class does not support the
Future-based interface seen on other classes in this module.
To authenticate with Facebook, register your application with
Facebook at http://www.facebook.com/developers/apps.php. Then
copy your API Key and Application Secret to the application settings
``facebook_api_key`` and ``facebook_secret``.
When your application is set up, you can use this mixin like this
to authenticate the user with Facebook::
class FacebookHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.asynchronous
def get(self):
if self.get_argument("session", None):
self.get_authenticated_user(self._on_auth)
return
yield self.authenticate_redirect()
def _on_auth(self, user):
if not user:
raise tornado.web.HTTPError(500, "Facebook auth failed")
# Save the user using, e.g., set_secure_cookie()
The user object returned by `get_authenticated_user` includes the
attributes ``facebook_uid`` and ``name`` in addition to session attributes
like ``session_key``. You should save the session key with the user; it is
required to make requests on behalf of the user later with
`facebook_request`.
"""
@return_future
def authenticate_redirect(self, callback_uri=None, cancel_uri=None,
extended_permissions=None, callback=None):
"""Authenticates/installs this app for the current user.
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
callback_uri = callback_uri or self.request.uri
args = {
"api_key": self.settings["facebook_api_key"],
"v": "1.0",
"fbconnect": "true",
"display": "page",
"next": urlparse.urljoin(self.request.full_url(), callback_uri),
"return_session": "true",
}
if cancel_uri:
args["cancel_url"] = urlparse.urljoin(
self.request.full_url(), cancel_uri)
if extended_permissions:
if isinstance(extended_permissions, (unicode_type, bytes)):
extended_permissions = [extended_permissions]
args["req_perms"] = ",".join(extended_permissions)
self.redirect("http://www.facebook.com/login.php?" +
urllib_parse.urlencode(args))
callback()
def authorize_redirect(self, extended_permissions, callback_uri=None,
cancel_uri=None, callback=None):
"""Redirects to an authorization request for the given FB resource.
The available resource names are listed at
http://wiki.developers.facebook.com/index.php/Extended_permission.
The most common resource types include:
* publish_stream
* read_stream
* email
* sms
extended_permissions can be a single permission name or a list of
names. To get the session secret and session key, call
get_authenticated_user() just as you would with
authenticate_redirect().
.. versionchanged:: 3.1
Returns a `.Future` and takes an optional callback. These are
not strictly necessary as this method is synchronous,
but they are supplied for consistency with
`OAuthMixin.authorize_redirect`.
"""
return self.authenticate_redirect(callback_uri, cancel_uri,
extended_permissions,
callback=callback)
def get_authenticated_user(self, callback):
"""Fetches the authenticated Facebook user.
The authenticated user includes the special Facebook attributes
'session_key' and 'facebook_uid' in addition to the standard
user attributes like 'name'.
"""
self.require_setting("facebook_api_key", "Facebook Connect")
session = escape.json_decode(self.get_argument("session"))
self.facebook_request(
method="facebook.users.getInfo",
callback=functools.partial(
self._on_get_user_info, callback, session),
session_key=session["session_key"],
uids=session["uid"],
fields="uid,first_name,last_name,name,locale,pic_square,"
"profile_url,username")
def facebook_request(self, method, callback, **args):
"""Makes a Facebook API REST request.
We automatically include the Facebook API key and signature, but
it is the callers responsibility to include 'session_key' and any
other required arguments to the method.
The available Facebook methods are documented here:
http://wiki.developers.facebook.com/index.php/API
Here is an example for the stream.get() method::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookMixin):
@tornado.web.authenticated
@tornado.web.asynchronous
def get(self):
self.facebook_request(
method="stream.get",
callback=self._on_stream,
session_key=self.current_user["session_key"])
def _on_stream(self, stream):
if stream is None:
# Not authorized to read the stream yet?
self.redirect(self.authorize_redirect("read_stream"))
return
self.render("stream.html", stream=stream)
"""
self.require_setting("facebook_api_key", "Facebook Connect")
self.require_setting("facebook_secret", "Facebook Connect")
if not method.startswith("facebook."):
method = "facebook." + method
args["api_key"] = self.settings["facebook_api_key"]
args["v"] = "1.0"
args["method"] = method
args["call_id"] = str(long(time.time() * 1e6))
args["format"] = "json"
args["sig"] = self._signature(args)
url = "http://api.facebook.com/restserver.php?" + \
urllib_parse.urlencode(args)
http = self.get_auth_http_client()
http.fetch(url, callback=functools.partial(
self._parse_response, callback))
def _on_get_user_info(self, callback, session, users):
if users is None:
callback(None)
return
callback({
"name": users[0]["name"],
"first_name": users[0]["first_name"],
"last_name": users[0]["last_name"],
"uid": users[0]["uid"],
"locale": users[0]["locale"],
"pic_square": users[0]["pic_square"],
"profile_url": users[0]["profile_url"],
"username": users[0].get("username"),
"session_key": session["session_key"],
"session_expires": session.get("expires"),
})
def _parse_response(self, callback, response):
if response.error:
gen_log.warning("HTTP error from Facebook: %s", response.error)
callback(None)
return
try:
json = escape.json_decode(response.body)
except Exception:
gen_log.warning("Invalid JSON from Facebook: %r", response.body)
callback(None)
return
if isinstance(json, dict) and json.get("error_code"):
gen_log.warning("Facebook error: %d: %r", json["error_code"],
json.get("error_msg"))
callback(None)
return
callback(json)
def _signature(self, args):
parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())]
body = "".join(parts) + self.settings["facebook_secret"]
if isinstance(body, unicode_type):
body = body.encode("utf-8")
return hashlib.md5(body).hexdigest()
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
class FacebookGraphMixin(OAuth2Mixin):
"""Facebook authentication using the new Graph API and OAuth2."""
_OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?"
_OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?"
_OAUTH_NO_CALLBACKS = False
_FACEBOOK_BASE_URL = "https://graph.facebook.com"
@_auth_return_future
def get_authenticated_user(self, redirect_uri, client_id, client_secret,
code, callback, extra_fields=None):
"""Handles the login for the Facebook user, returning a user object.
Example usage::
class FacebookGraphLoginHandler(LoginHandler, tornado.auth.FacebookGraphMixin):
@tornado.gen.coroutine
def get(self):
if self.get_argument("code", False):
user = yield self.get_authenticated_user(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
client_secret=self.settings["facebook_secret"],
code=self.get_argument("code"))
# Save the user with e.g. set_secure_cookie
else:
yield self.authorize_redirect(
redirect_uri='/auth/facebookgraph/',
client_id=self.settings["facebook_api_key"],
extra_params={"scope": "read_stream,offline_access"})
"""
http = self.get_auth_http_client()
args = {
"redirect_uri": redirect_uri,
"code": code,
"client_id": client_id,
"client_secret": client_secret,
}
fields = set(['id', 'name', 'first_name', 'last_name',
'locale', 'picture', 'link'])
if extra_fields:
fields.update(extra_fields)
http.fetch(self._oauth_request_token_url(**args),
functools.partial(self._on_access_token, redirect_uri, client_id,
client_secret, callback, fields))
def _on_access_token(self, redirect_uri, client_id, client_secret,
future, fields, response):
if response.error:
future.set_exception(AuthError('Facebook auth error: %s' % str(response)))
return
args = escape.parse_qs_bytes(escape.native_str(response.body))
session = {
"access_token": args["access_token"][-1],
"expires": args.get("expires")
}
self.facebook_request(
path="/me",
callback=functools.partial(
self._on_get_user_info, future, session, fields),
access_token=session["access_token"],
fields=",".join(fields)
)
def _on_get_user_info(self, future, session, fields, user):
if user is None:
future.set_result(None)
return
fieldmap = {}
for field in fields:
fieldmap[field] = user.get(field)
fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")})
future.set_result(fieldmap)
@_auth_return_future
def facebook_request(self, path, callback, access_token=None,
post_args=None, **args):
"""Fetches the given relative API path, e.g., "/btaylor/picture"
If the request is a POST, ``post_args`` should be provided. Query
string arguments should be given as keyword arguments.
An introduction to the Facebook Graph API can be found at
http://developers.facebook.com/docs/api
Many methods require an OAuth access token which you can
obtain through `~OAuth2Mixin.authorize_redirect` and
`get_authenticated_user`. The user returned through that
process includes an ``access_token`` attribute that can be
used to make authenticated requests via this method.
Example usage::
class MainHandler(tornado.web.RequestHandler,
tornado.auth.FacebookGraphMixin):
@tornado.web.authenticated
@tornado.gen.coroutine
def get(self):
new_entry = yield self.facebook_request(
"/me/feed",
post_args={"message": "I am posting from my Tornado application!"},
access_token=self.current_user["access_token"])
if not new_entry:
# Call failed; perhaps missing permission?
yield self.authorize_redirect()
return
self.finish("Posted a message!")
The given path is relative to ``self._FACEBOOK_BASE_URL``,
by default "https://graph.facebook.com".
.. versionchanged:: 3.1
Added the ability to override ``self._FACEBOOK_BASE_URL``.
"""
url = self._FACEBOOK_BASE_URL + path
all_args = {}
if access_token:
all_args["access_token"] = access_token
all_args.update(args)
if all_args:
url += "?" + urllib_parse.urlencode(all_args)
callback = functools.partial(self._on_facebook_request, callback)
http = self.get_auth_http_client()
if post_args is not None:
http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args),
callback=callback)
else:
http.fetch(url, callback=callback)
def _on_facebook_request(self, future, response):
if response.error:
future.set_exception(AuthError("Error response %s fetching %s" %
(response.error, response.request.url)))
return
future.set_result(escape.json_decode(response.body))
def get_auth_http_client(self):
"""Returns the `.AsyncHTTPClient` instance to be used for auth requests.
May be overridden by subclasses to use an HTTP client other than
the default.
"""
return httpclient.AsyncHTTPClient()
def _oauth_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(consumer_token["secret"])]
key_elems.append(escape.utf8(token["secret"] if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None):
"""Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request.
See http://oauth.net/core/1.0a/#signing_process
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + "://" + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = "&".join(_oauth_escape(e) for e in base_elems)
key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))]
key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else ""))
key = b"&".join(key_elems)
hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
if isinstance(val, unicode_type):
val = val.encode("utf-8")
return urllib_parse.quote(val, safe="~")
def _oauth_parse_response(body):
# I can't find an officially-defined encoding for oauth responses and
# have never seen anyone use non-ascii. Leave the response in a byte
# string for python 2, and use utf8 on python 3.
body = escape.native_str(body)
p = urlparse.parse_qs(body, keep_blank_values=False)
token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0])
# Add the extra parameters the Provider included to the token
special = ("oauth_token", "oauth_token_secret")
token.update((k, p[k][0]) for k in p if k not in special)
return token
|
gpl-3.0
|
garvitr/sympy
|
sympy/stats/crv_types.py
|
22
|
60361
|
"""
Continuous Random Variables - Prebuilt variables
Contains
========
Arcsin
Benini
Beta
BetaPrime
Cauchy
Chi
ChiNoncentral
ChiSquared
Dagum
Erlang
Exponential
FDistribution
FisherZ
Frechet
Gamma
GammaInverse
Kumaraswamy
Laplace
Logistic
LogNormal
Maxwell
Nakagami
Normal
Pareto
QuadraticU
RaisedCosine
Rayleigh
StudentT
Triangular
Uniform
UniformSum
VonMises
Weibull
WignerSemicircle
"""
from __future__ import print_function, division
from sympy import (log, sqrt, pi, S, Dummy, Interval, sympify, gamma,
Piecewise, And, Eq, binomial, factorial, Sum, floor, Abs,
Lambda, Basic)
from sympy import beta as beta_fn
from sympy import cos, exp, besseli
from sympy.stats.crv import (SingleContinuousPSpace, SingleContinuousDistribution,
ContinuousDistributionHandmade)
from sympy.stats.rv import _value_check
import random
oo = S.Infinity
__all__ = ['ContinuousRV',
'Arcsin',
'Benini',
'Beta',
'BetaPrime',
'Cauchy',
'Chi',
'ChiNoncentral',
'ChiSquared',
'Dagum',
'Erlang',
'Exponential',
'FDistribution',
'FisherZ',
'Frechet',
'Gamma',
'GammaInverse',
'Kumaraswamy',
'Laplace',
'Logistic',
'LogNormal',
'Maxwell',
'Nakagami',
'Normal',
'Pareto',
'QuadraticU',
'RaisedCosine',
'Rayleigh',
'StudentT',
'Triangular',
'Uniform',
'UniformSum',
'VonMises',
'Weibull',
'WignerSemicircle'
]
def ContinuousRV(symbol, density, set=Interval(-oo, oo)):
"""
Create a Continuous Random Variable given the following:
-- a symbol
-- a probability density function
-- set on which the pdf is valid (defaults to entire real line)
Returns a RandomSymbol.
Many common continuous random variable types are already implemented.
This function should be necessary only very rarely.
Examples
========
>>> from sympy import Symbol, sqrt, exp, pi
>>> from sympy.stats import ContinuousRV, P, E
>>> x = Symbol("x")
>>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
>>> X = ContinuousRV(x, pdf)
>>> E(X)
0
>>> P(X>0)
1/2
"""
pdf = Lambda(symbol, density)
dist = ContinuousDistributionHandmade(pdf, set)
return SingleContinuousPSpace(symbol, dist).value
def rv(symbol, cls, args):
args = list(map(sympify, args))
dist = cls(*args)
dist.check(*args)
return SingleContinuousPSpace(symbol, dist).value
########################################
# Continuous Probability Distributions #
########################################
#-------------------------------------------------------------------------------
# Arcsin distribution ----------------------------------------------------------
class ArcsinDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
def pdf(self, x):
return 1/(pi*sqrt((x - self.a)*(self.b - x)))
def Arcsin(name, a=0, b=1):
r"""
Create a Continuous Random Variable with an arcsin distribution.
The density of the arcsin distribution is given by
.. math::
f(x) := \frac{1}{\pi\sqrt{(x-a)(b-x)}}
with :math:`x \in [a,b]`. It must hold that :math:`-\infty < a < b < \infty`.
Parameters
==========
a : Real number, the left interval boundary
b : Real number, the right interval boundary
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Arcsin, density
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = Arcsin("x", a, b)
>>> density(X)(z)
1/(pi*sqrt((-a + z)*(b - z)))
References
==========
.. [1] http://en.wikipedia.org/wiki/Arcsine_distribution
"""
return rv(name, ArcsinDistribution, (a, b))
#-------------------------------------------------------------------------------
# Benini distribution ----------------------------------------------------------
class BeniniDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta', 'sigma')
@property
def set(self):
return Interval(self.sigma, oo)
def pdf(self, x):
alpha, beta, sigma = self.alpha, self.beta, self.sigma
return (exp(-alpha*log(x/sigma) - beta*log(x/sigma)**2)
*(alpha/x + 2*beta*log(x/sigma)/x))
def Benini(name, alpha, beta, sigma):
r"""
Create a Continuous Random Variable with a Benini distribution.
The density of the Benini distribution is given by
.. math::
f(x) := e^{-\alpha\log{\frac{x}{\sigma}}
-\beta\log^2\left[{\frac{x}{\sigma}}\right]}
\left(\frac{\alpha}{x}+\frac{2\beta\log{\frac{x}{\sigma}}}{x}\right)
This is a heavy-tailed distrubtion and is also known as the log-Rayleigh
distribution.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
sigma : Real number, `\sigma > 0`, a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Benini, density
>>> from sympy import Symbol, simplify, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Benini("x", alpha, beta, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ / z \\ / z \ 2/ z \
| 2*beta*log|-----|| - alpha*log|-----| - beta*log |-----|
|alpha \sigma/| \sigma/ \sigma/
|----- + -----------------|*e
\ z z /
References
==========
.. [1] http://en.wikipedia.org/wiki/Benini_distribution
.. [2] http://reference.wolfram.com/legacy/v8/ref/BeniniDistribution.html
"""
return rv(name, BeniniDistribution, (alpha, beta, sigma))
#-------------------------------------------------------------------------------
# Beta distribution ------------------------------------------------------------
class BetaDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, 1)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1) * (1 - x)**(beta - 1) / beta_fn(alpha, beta)
def sample(self):
return random.betavariate(self.alpha, self.beta)
def Beta(name, alpha, beta):
r"""
Create a Continuous Random Variable with a Beta distribution.
The density of the Beta distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1}(1-x)^{\beta-1}} {\mathrm{B}(\alpha,\beta)}
with :math:`x \in [0,1]`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Beta, density, E, variance
>>> from sympy import Symbol, simplify, pprint, expand_func
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Beta("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 beta - 1
z *(-z + 1)
---------------------------
beta(alpha, beta)
>>> expand_func(simplify(E(X, meijerg=True)))
alpha/(alpha + beta)
>>> simplify(variance(X, meijerg=True)) #doctest: +SKIP
alpha*beta/((alpha + beta)**2*(alpha + beta + 1))
References
==========
.. [1] http://en.wikipedia.org/wiki/Beta_distribution
.. [2] http://mathworld.wolfram.com/BetaDistribution.html
"""
return rv(name, BetaDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Beta prime distribution ------------------------------------------------------
class BetaPrimeDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return x**(alpha - 1)*(1 + x)**(-alpha - beta)/beta_fn(alpha, beta)
def BetaPrime(name, alpha, beta):
r"""
Create a continuous random variable with a Beta prime distribution.
The density of the Beta prime distribution is given by
.. math::
f(x) := \frac{x^{\alpha-1} (1+x)^{-\alpha -\beta}}{B(\alpha,\beta)}
with :math:`x > 0`.
Parameters
==========
alpha : Real number, `\alpha > 0`, a shape
beta : Real number, `\beta > 0`, a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import BetaPrime, density
>>> from sympy import Symbol, pprint
>>> alpha = Symbol("alpha", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = BetaPrime("x", alpha, beta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
alpha - 1 -alpha - beta
z *(z + 1)
-------------------------------
beta(alpha, beta)
References
==========
.. [1] http://en.wikipedia.org/wiki/Beta_prime_distribution
.. [2] http://mathworld.wolfram.com/BetaPrimeDistribution.html
"""
return rv(name, BetaPrimeDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Cauchy distribution ----------------------------------------------------------
class CauchyDistribution(SingleContinuousDistribution):
_argnames = ('x0', 'gamma')
def pdf(self, x):
return 1/(pi*self.gamma*(1 + ((x - self.x0)/self.gamma)**2))
def Cauchy(name, x0, gamma):
r"""
Create a continuous random variable with a Cauchy distribution.
The density of the Cauchy distribution is given by
.. math::
f(x) := \frac{1}{\pi} \arctan\left(\frac{x-x_0}{\gamma}\right)
+\frac{1}{2}
Parameters
==========
x0 : Real number, the location
gamma : Real number, `\gamma > 0`, the scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Cauchy, density
>>> from sympy import Symbol
>>> x0 = Symbol("x0")
>>> gamma = Symbol("gamma", positive=True)
>>> z = Symbol("z")
>>> X = Cauchy("x", x0, gamma)
>>> density(X)(z)
1/(pi*gamma*(1 + (-x0 + z)**2/gamma**2))
References
==========
.. [1] http://en.wikipedia.org/wiki/Cauchy_distribution
.. [2] http://mathworld.wolfram.com/CauchyDistribution.html
"""
return rv(name, CauchyDistribution, (x0, gamma))
#-------------------------------------------------------------------------------
# Chi distribution -------------------------------------------------------------
class ChiDistribution(SingleContinuousDistribution):
_argnames = ('k',)
set = Interval(0, oo)
def pdf(self, x):
return 2**(1 - self.k/2)*x**(self.k - 1)*exp(-x**2/2)/gamma(self.k/2)
def Chi(name, k):
r"""
Create a continuous random variable with a Chi distribution.
The density of the Chi distribution is given by
.. math::
f(x) := \frac{2^{1-k/2}x^{k-1}e^{-x^2/2}}{\Gamma(k/2)}
with :math:`x \geq 0`.
Parameters
==========
k : A positive Integer, `k > 0`, the number of degrees of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Chi, density, E, std
>>> from sympy import Symbol, simplify
>>> k = Symbol("k", integer=True)
>>> z = Symbol("z")
>>> X = Chi("x", k)
>>> density(X)(z)
2**(-k/2 + 1)*z**(k - 1)*exp(-z**2/2)/gamma(k/2)
References
==========
.. [1] http://en.wikipedia.org/wiki/Chi_distribution
.. [2] http://mathworld.wolfram.com/ChiDistribution.html
"""
return rv(name, ChiDistribution, (k,))
#-------------------------------------------------------------------------------
# Non-central Chi distribution -------------------------------------------------
class ChiNoncentralDistribution(SingleContinuousDistribution):
_argnames = ('k', 'l')
set = Interval(0, oo)
def pdf(self, x):
k, l = self.k, self.l
return exp(-(x**2+l**2)/2)*x**k*l / (l*x)**(k/2) * besseli(k/2-1, l*x)
def ChiNoncentral(name, k, l):
r"""
Create a continuous random variable with a non-central Chi distribution.
The density of the non-central Chi distribution is given by
.. math::
f(x) := \frac{e^{-(x^2+\lambda^2)/2} x^k\lambda}
{(\lambda x)^{k/2}} I_{k/2-1}(\lambda x)
with `x \geq 0`. Here, `I_\nu (x)` is the
:ref:`modified Bessel function of the first kind <besseli>`.
Parameters
==========
k : A positive Integer, `k > 0`, the number of degrees of freedom
l : Shift parameter
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import ChiNoncentral, density, E, std
>>> from sympy import Symbol, simplify
>>> k = Symbol("k", integer=True)
>>> l = Symbol("l")
>>> z = Symbol("z")
>>> X = ChiNoncentral("x", k, l)
>>> density(X)(z)
l*z**k*(l*z)**(-k/2)*exp(-l**2/2 - z**2/2)*besseli(k/2 - 1, l*z)
References
==========
.. [1] http://en.wikipedia.org/wiki/Noncentral_chi_distribution
"""
return rv(name, ChiNoncentralDistribution, (k, l))
#-------------------------------------------------------------------------------
# Chi squared distribution -----------------------------------------------------
class ChiSquaredDistribution(SingleContinuousDistribution):
_argnames = ('k',)
set = Interval(0, oo)
def pdf(self, x):
k = self.k
return 1/(2**(k/2)*gamma(k/2))*x**(k/2 - 1)*exp(-x/2)
def ChiSquared(name, k):
r"""
Create a continuous random variable with a Chi-squared distribution.
The density of the Chi-squared distribution is given by
.. math::
f(x) := \frac{1}{2^{\frac{k}{2}}\Gamma\left(\frac{k}{2}\right)}
x^{\frac{k}{2}-1} e^{-\frac{x}{2}}
with :math:`x \geq 0`.
Parameters
==========
k : A positive Integer, `k > 0`, the number of degrees of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import ChiSquared, density, E, variance
>>> from sympy import Symbol, simplify, combsimp, expand_func
>>> k = Symbol("k", integer=True, positive=True)
>>> z = Symbol("z")
>>> X = ChiSquared("x", k)
>>> density(X)(z)
2**(-k/2)*z**(k/2 - 1)*exp(-z/2)/gamma(k/2)
>>> combsimp(E(X))
k
>>> simplify(expand_func(variance(X)))
2*k
References
==========
.. [1] http://en.wikipedia.org/wiki/Chi_squared_distribution
.. [2] http://mathworld.wolfram.com/Chi-SquaredDistribution.html
"""
return rv(name, ChiSquaredDistribution, (k, ))
#-------------------------------------------------------------------------------
# Dagum distribution -----------------------------------------------------------
class DagumDistribution(SingleContinuousDistribution):
_argnames = ('p', 'a', 'b')
def pdf(self, x):
p, a, b = self.p, self.a, self.b
return a*p/x*((x/b)**(a*p)/(((x/b)**a + 1)**(p + 1)))
def Dagum(name, p, a, b):
r"""
Create a continuous random variable with a Dagum distribution.
The density of the Dagum distribution is given by
.. math::
f(x) := \frac{a p}{x} \left( \frac{\left(\tfrac{x}{b}\right)^{a p}}
{\left(\left(\tfrac{x}{b}\right)^a + 1 \right)^{p+1}} \right)
with :math:`x > 0`.
Parameters
==========
p : Real number, `p > 0`, a shape
a : Real number, `a > 0`, a shape
b : Real number, `b > 0`, a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Dagum, density
>>> from sympy import Symbol, simplify
>>> p = Symbol("p", positive=True)
>>> b = Symbol("b", positive=True)
>>> a = Symbol("a", positive=True)
>>> z = Symbol("z")
>>> X = Dagum("x", p, a, b)
>>> density(X)(z)
a*p*(z/b)**(a*p)*((z/b)**a + 1)**(-p - 1)/z
References
==========
.. [1] http://en.wikipedia.org/wiki/Dagum_distribution
"""
return rv(name, DagumDistribution, (p, a, b))
#-------------------------------------------------------------------------------
# Erlang distribution ----------------------------------------------------------
def Erlang(name, k, l):
r"""
Create a continuous random variable with an Erlang distribution.
The density of the Erlang distribution is given by
.. math::
f(x) := \frac{\lambda^k x^{k-1} e^{-\lambda x}}{(k-1)!}
with :math:`x \in [0,\infty]`.
Parameters
==========
k : Integer
l : Real number, `\lambda > 0`, the rate
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Erlang, density, cdf, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> k = Symbol("k", integer=True, positive=True)
>>> l = Symbol("l", positive=True)
>>> z = Symbol("z")
>>> X = Erlang("x", k, l)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k k - 1 -l*z
l *z *e
---------------
gamma(k)
>>> C = cdf(X, meijerg=True)(z)
>>> pprint(C, use_unicode=False)
/ k*lowergamma(k, 0) k*lowergamma(k, l*z)
|- ------------------ + -------------------- for z >= 0
< gamma(k + 1) gamma(k + 1)
|
\ 0 otherwise
>>> simplify(E(X))
k/l
>>> simplify(variance(X))
k/l**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Erlang_distribution
.. [2] http://mathworld.wolfram.com/ErlangDistribution.html
"""
return rv(name, GammaDistribution, (k, 1/l))
#-------------------------------------------------------------------------------
# Exponential distribution -----------------------------------------------------
class ExponentialDistribution(SingleContinuousDistribution):
_argnames = ('rate',)
set = Interval(0, oo)
@staticmethod
def check(rate):
_value_check(rate > 0, "Rate must be positive.")
def pdf(self, x):
return self.rate * exp(-self.rate*x)
def sample(self):
return random.expovariate(self.rate)
def Exponential(name, rate):
r"""
Create a continuous random variable with an Exponential distribution.
The density of the exponential distribution is given by
.. math::
f(x) := \lambda \exp(-\lambda x)
with `x > 0`. Note that the expected value is `1/\lambda`.
Parameters
==========
rate : A positive Real number, `\lambda > 0`, the rate (or inverse scale/inverse mean)
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Exponential, density, cdf, E
>>> from sympy.stats import variance, std, skewness
>>> from sympy import Symbol
>>> l = Symbol("lambda", positive=True)
>>> z = Symbol("z")
>>> X = Exponential("x", l)
>>> density(X)(z)
lambda*exp(-lambda*z)
>>> cdf(X)(z)
Piecewise((1 - exp(-lambda*z), z >= 0), (0, True))
>>> E(X)
1/lambda
>>> variance(X)
lambda**(-2)
>>> skewness(X)
2
>>> X = Exponential('x', 10)
>>> density(X)(z)
10*exp(-10*z)
>>> E(X)
1/10
>>> std(X)
1/10
References
==========
.. [1] http://en.wikipedia.org/wiki/Exponential_distribution
.. [2] http://mathworld.wolfram.com/ExponentialDistribution.html
"""
return rv(name, ExponentialDistribution, (rate, ))
#-------------------------------------------------------------------------------
# F distribution ---------------------------------------------------------------
class FDistributionDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
set = Interval(0, oo)
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (sqrt((d1*x)**d1*d2**d2 / (d1*x+d2)**(d1+d2))
/ (x * beta_fn(d1/2, d2/2)))
def FDistribution(name, d1, d2):
r"""
Create a continuous random variable with a F distribution.
The density of the F distribution is given by
.. math::
f(x) := \frac{\sqrt{\frac{(d_1 x)^{d_1} d_2^{d_2}}
{(d_1 x + d_2)^{d_1 + d_2}}}}
{x \mathrm{B} \left(\frac{d_1}{2}, \frac{d_2}{2}\right)}
with :math:`x > 0`.
.. TODO - What do these parameters mean?
Parameters
==========
d1 : `d_1 > 0` a parameter
d2 : `d_2 > 0` a parameter
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import FDistribution, density
>>> from sympy import Symbol, simplify, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FDistribution("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d2
-- ______________________________
2 / d1 -d1 - d2
d2 *\/ (d1*z) *(d1*z + d2)
--------------------------------------
/d1 d2\
z*beta|--, --|
\2 2 /
References
==========
.. [1] http://en.wikipedia.org/wiki/F-distribution
.. [2] http://mathworld.wolfram.com/F-Distribution.html
"""
return rv(name, FDistributionDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Fisher Z distribution --------------------------------------------------------
class FisherZDistribution(SingleContinuousDistribution):
_argnames = ('d1', 'd2')
def pdf(self, x):
d1, d2 = self.d1, self.d2
return (2*d1**(d1/2)*d2**(d2/2) / beta_fn(d1/2, d2/2) *
exp(d1*x) / (d1*exp(2*x)+d2)**((d1+d2)/2))
def FisherZ(name, d1, d2):
r"""
Create a Continuous Random Variable with an Fisher's Z distribution.
The density of the Fisher's Z distribution is given by
.. math::
f(x) := \frac{2d_1^{d_1/2} d_2^{d_2/2}} {\mathrm{B}(d_1/2, d_2/2)}
\frac{e^{d_1z}}{\left(d_1e^{2z}+d_2\right)^{\left(d_1+d_2\right)/2}}
.. TODO - What is the difference between these degrees of freedom?
Parameters
==========
d1 : `d_1 > 0`, degree of freedom
d2 : `d_2 > 0`, degree of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import FisherZ, density
>>> from sympy import Symbol, simplify, pprint
>>> d1 = Symbol("d1", positive=True)
>>> d2 = Symbol("d2", positive=True)
>>> z = Symbol("z")
>>> X = FisherZ("x", d1, d2)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
d1 d2
d1 d2 - -- - --
-- -- 2 2
2 2 / 2*z \ d1*z
2*d1 *d2 *\d1*e + d2/ *e
-----------------------------------------
/d1 d2\
beta|--, --|
\2 2 /
References
==========
.. [1] http://en.wikipedia.org/wiki/Fisher%27s_z-distribution
.. [2] http://mathworld.wolfram.com/Fishersz-Distribution.html
"""
return rv(name, FisherZDistribution, (d1, d2))
#-------------------------------------------------------------------------------
# Frechet distribution ---------------------------------------------------------
class FrechetDistribution(SingleContinuousDistribution):
_argnames = ('a', 's', 'm')
set = Interval(0, oo)
def __new__(cls, a, s=1, m=0):
a, s, m = list(map(sympify, (a, s, m)))
return Basic.__new__(cls, a, s, m)
def pdf(self, x):
a, s, m = self.a, self.s, self.m
return a/s * ((x-m)/s)**(-1-a) * exp(-((x-m)/s)**(-a))
def Frechet(name, a, s=1, m=0):
r"""
Create a continuous random variable with a Frechet distribution.
The density of the Frechet distribution is given by
.. math::
f(x) := \frac{\alpha}{s} \left(\frac{x-m}{s}\right)^{-1-\alpha}
e^{-(\frac{x-m}{s})^{-\alpha}}
with :math:`x \geq m`.
Parameters
==========
a : Real number, :math:`a \in \left(0, \infty\right)` the shape
s : Real number, :math:`s \in \left(0, \infty\right)` the scale
m : Real number, :math:`m \in \left(-\infty, \infty\right)` the minimum
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Frechet, density, E, std
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> s = Symbol("s", positive=True)
>>> m = Symbol("m", real=True)
>>> z = Symbol("z")
>>> X = Frechet("x", a, s, m)
>>> density(X)(z)
a*((-m + z)/s)**(-a - 1)*exp(-((-m + z)/s)**(-a))/s
References
==========
.. [1] http://en.wikipedia.org/wiki/Fr%C3%A9chet_distribution
"""
return rv(name, FrechetDistribution, (a, s, m))
#-------------------------------------------------------------------------------
# Gamma distribution -----------------------------------------------------------
class GammaDistribution(SingleContinuousDistribution):
_argnames = ('k', 'theta')
set = Interval(0, oo)
@staticmethod
def check(k, theta):
_value_check(k > 0, "k must be positive")
_value_check(theta > 0, "Theta must be positive")
def pdf(self, x):
k, theta = self.k, self.theta
return x**(k - 1) * exp(-x/theta) / (gamma(k)*theta**k)
def sample(self):
return random.gammavariate(self.k, self.theta)
def Gamma(name, k, theta):
r"""
Create a continuous random variable with a Gamma distribution.
The density of the Gamma distribution is given by
.. math::
f(x) := \frac{1}{\Gamma(k) \theta^k} x^{k - 1} e^{-\frac{x}{\theta}}
with :math:`x \in [0,1]`.
Parameters
==========
k : Real number, `k > 0`, a shape
theta : Real number, `\theta > 0`, a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Gamma, density, cdf, E, variance
>>> from sympy import Symbol, pprint, simplify
>>> k = Symbol("k", positive=True)
>>> theta = Symbol("theta", positive=True)
>>> z = Symbol("z")
>>> X = Gamma("x", k, theta)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-z
-----
-k k - 1 theta
theta *z *e
---------------------
gamma(k)
>>> C = cdf(X, meijerg=True)(z)
>>> pprint(C, use_unicode=False)
/ / z \
| k*lowergamma|k, -----|
| k*lowergamma(k, 0) \ theta/
<- ------------------ + ---------------------- for z >= 0
| gamma(k + 1) gamma(k + 1)
|
\ 0 otherwise
>>> E(X)
theta*gamma(k + 1)/gamma(k)
>>> V = simplify(variance(X))
>>> pprint(V, use_unicode=False)
2
k*theta
References
==========
.. [1] http://en.wikipedia.org/wiki/Gamma_distribution
.. [2] http://mathworld.wolfram.com/GammaDistribution.html
"""
return rv(name, GammaDistribution, (k, theta))
#-------------------------------------------------------------------------------
# Inverse Gamma distribution ---------------------------------------------------
class GammaInverseDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "alpha must be positive")
_value_check(b > 0, "beta must be positive")
def pdf(self, x):
a, b = self.a, self.b
return b**a/gamma(a) * x**(-a-1) * exp(-b/x)
def GammaInverse(name, a, b):
r"""
Create a continuous random variable with an inverse Gamma distribution.
The density of the inverse Gamma distribution is given by
.. math::
f(x) := \frac{\beta^\alpha}{\Gamma(\alpha)} x^{-\alpha - 1}
\exp\left(\frac{-\beta}{x}\right)
with :math:`x > 0`.
Parameters
==========
a : Real number, `a > 0` a shape
b : Real number, `b > 0` a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import GammaInverse, density, cdf, E, variance
>>> from sympy import Symbol, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = GammaInverse("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
-b
---
a -a - 1 z
b *z *e
---------------
gamma(a)
References
==========
.. [1] http://en.wikipedia.org/wiki/Inverse-gamma_distribution
"""
return rv(name, GammaInverseDistribution, (a, b))
#-------------------------------------------------------------------------------
# Kumaraswamy distribution -----------------------------------------------------
class KumaraswamyDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
set = Interval(0, oo)
@staticmethod
def check(a, b):
_value_check(a > 0, "a must be positive")
_value_check(b > 0, "b must be positive")
def pdf(self, x):
a, b = self.a, self.b
return a * b * x**(a-1) * (1-x**a)**(b-1)
def Kumaraswamy(name, a, b):
r"""
Create a Continuous Random Variable with a Kumaraswamy distribution.
The density of the Kumaraswamy distribution is given by
.. math::
f(x) := a b x^{a-1} (1-x^a)^{b-1}
with :math:`x \in [0,1]`.
Parameters
==========
a : Real number, `a > 0` a shape
b : Real number, `b > 0` a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Kumaraswamy, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> a = Symbol("a", positive=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Kumaraswamy("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
b - 1
a - 1 / a \
a*b*z *\- z + 1/
References
==========
.. [1] http://en.wikipedia.org/wiki/Kumaraswamy_distribution
"""
return rv(name, KumaraswamyDistribution, (a, b))
#-------------------------------------------------------------------------------
# Laplace distribution ---------------------------------------------------------
class LaplaceDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'b')
def pdf(self, x):
mu, b = self.mu, self.b
return 1/(2*b)*exp(-Abs(x - mu)/b)
def Laplace(name, mu, b):
r"""
Create a continuous random variable with a Laplace distribution.
The density of the Laplace distribution is given by
.. math::
f(x) := \frac{1}{2 b} \exp \left(-\frac{|x-\mu|}b \right)
Parameters
==========
mu : Real number, the location (mean)
b : Real number, `b > 0`, a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Laplace, density
>>> from sympy import Symbol
>>> mu = Symbol("mu")
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Laplace("x", mu, b)
>>> density(X)(z)
exp(-Abs(mu - z)/b)/(2*b)
References
==========
.. [1] http://en.wikipedia.org/wiki/Laplace_distribution
.. [2] http://mathworld.wolfram.com/LaplaceDistribution.html
"""
return rv(name, LaplaceDistribution, (mu, b))
#-------------------------------------------------------------------------------
# Logistic distribution --------------------------------------------------------
class LogisticDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
def pdf(self, x):
mu, s = self.mu, self.s
return exp(-(x - mu)/s)/(s*(1 + exp(-(x - mu)/s))**2)
def Logistic(name, mu, s):
r"""
Create a continuous random variable with a logistic distribution.
The density of the logistic distribution is given by
.. math::
f(x) := \frac{e^{-(x-\mu)/s}} {s\left(1+e^{-(x-\mu)/s}\right)^2}
Parameters
==========
mu : Real number, the location (mean)
s : Real number, `s > 0` a scale
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Logistic, density
>>> from sympy import Symbol
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = Logistic("x", mu, s)
>>> density(X)(z)
exp((mu - z)/s)/(s*(exp((mu - z)/s) + 1)**2)
References
==========
.. [1] http://en.wikipedia.org/wiki/Logistic_distribution
.. [2] http://mathworld.wolfram.com/LogisticDistribution.html
"""
return rv(name, LogisticDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Log Normal distribution ------------------------------------------------------
class LogNormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
set = Interval(0, oo)
def pdf(self, x):
mean, std = self.mean, self.std
return exp(-(log(x) - mean)**2 / (2*std**2)) / (x*sqrt(2*pi)*std)
def sample(self):
return random.lognormvariate(self.mean, self.std)
def LogNormal(name, mean, std):
r"""
Create a continuous random variable with a log-normal distribution.
The density of the log-normal distribution is given by
.. math::
f(x) := \frac{1}{x\sqrt{2\pi\sigma^2}}
e^{-\frac{\left(\ln x-\mu\right)^2}{2\sigma^2}}
with :math:`x \geq 0`.
Parameters
==========
mu : Real number, the log-scale
sigma : Real number, :math:`\sigma^2 > 0` a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import LogNormal, density
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", real=True)
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = LogNormal("x", mu, sigma)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-(-mu + log(z))
-----------------
2
___ 2*sigma
\/ 2 *e
------------------------
____
2*\/ pi *sigma*z
>>> X = LogNormal('x', 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-log(z)**2/2)/(2*sqrt(pi)*z)
References
==========
.. [1] http://en.wikipedia.org/wiki/Lognormal
.. [2] http://mathworld.wolfram.com/LogNormalDistribution.html
"""
return rv(name, LogNormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Maxwell distribution ---------------------------------------------------------
class MaxwellDistribution(SingleContinuousDistribution):
_argnames = ('a',)
set = Interval(0, oo)
def pdf(self, x):
a = self.a
return sqrt(2/pi)*x**2*exp(-x**2/(2*a**2))/a**3
def Maxwell(name, a):
r"""
Create a continuous random variable with a Maxwell distribution.
The density of the Maxwell distribution is given by
.. math::
f(x) := \sqrt{\frac{2}{\pi}} \frac{x^2 e^{-x^2/(2a^2)}}{a^3}
with :math:`x \geq 0`.
.. TODO - what does the parameter mean?
Parameters
==========
a : Real number, `a > 0`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Maxwell, density, E, variance
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", positive=True)
>>> z = Symbol("z")
>>> X = Maxwell("x", a)
>>> density(X)(z)
sqrt(2)*z**2*exp(-z**2/(2*a**2))/(sqrt(pi)*a**3)
>>> E(X)
2*sqrt(2)*a/sqrt(pi)
>>> simplify(variance(X))
a**2*(-8 + 3*pi)/pi
References
==========
.. [1] http://en.wikipedia.org/wiki/Maxwell_distribution
.. [2] http://mathworld.wolfram.com/MaxwellDistribution.html
"""
return rv(name, MaxwellDistribution, (a, ))
#-------------------------------------------------------------------------------
# Nakagami distribution --------------------------------------------------------
class NakagamiDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'omega')
set = Interval(0, oo)
def pdf(self, x):
mu, omega = self.mu, self.omega
return 2*mu**mu/(gamma(mu)*omega**mu)*x**(2*mu - 1)*exp(-mu/omega*x**2)
def Nakagami(name, mu, omega):
r"""
Create a continuous random variable with a Nakagami distribution.
The density of the Nakagami distribution is given by
.. math::
f(x) := \frac{2\mu^\mu}{\Gamma(\mu)\omega^\mu} x^{2\mu-1}
\exp\left(-\frac{\mu}{\omega}x^2 \right)
with :math:`x > 0`.
Parameters
==========
mu : Real number, `\mu \geq \frac{1}{2}` a shape
omega : Real number, `\omega > 0`, the spread
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Nakagami, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", positive=True)
>>> omega = Symbol("omega", positive=True)
>>> z = Symbol("z")
>>> X = Nakagami("x", mu, omega)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
2
-mu*z
-------
mu -mu 2*mu - 1 omega
2*mu *omega *z *e
----------------------------------
gamma(mu)
>>> simplify(E(X, meijerg=True))
sqrt(mu)*sqrt(omega)*gamma(mu + 1/2)/gamma(mu + 1)
>>> V = simplify(variance(X, meijerg=True))
>>> pprint(V, use_unicode=False)
2
omega*gamma (mu + 1/2)
omega - -----------------------
gamma(mu)*gamma(mu + 1)
References
==========
.. [1] http://en.wikipedia.org/wiki/Nakagami_distribution
"""
return rv(name, NakagamiDistribution, (mu, omega))
#-------------------------------------------------------------------------------
# Normal distribution ----------------------------------------------------------
class NormalDistribution(SingleContinuousDistribution):
_argnames = ('mean', 'std')
@staticmethod
def check(mean, std):
_value_check(std > 0, "Standard deviation must be positive")
def pdf(self, x):
return exp(-(x - self.mean)**2 / (2*self.std**2)) / (sqrt(2*pi)*self.std)
def sample(self):
return random.normalvariate(self.mean, self.std)
def Normal(name, mean, std):
r"""
Create a continuous random variable with a Normal distribution.
The density of the Normal distribution is given by
.. math::
f(x) := \frac{1}{\sigma\sqrt{2\pi}} e^{ -\frac{(x-\mu)^2}{2\sigma^2} }
Parameters
==========
mu : Real number, the mean
sigma : Real number, :math:`\sigma^2 > 0` the variance
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Normal, density, E, std, cdf, skewness
>>> from sympy import Symbol, simplify, pprint, factor, together, factor_terms
>>> mu = Symbol("mu")
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Normal("x", mu, sigma)
>>> density(X)(z)
sqrt(2)*exp(-(-mu + z)**2/(2*sigma**2))/(2*sqrt(pi)*sigma)
>>> C = simplify(cdf(X))(z) # it needs a little more help...
>>> pprint(C, use_unicode=False)
/ ___ \
|\/ 2 *(-mu + z)|
erf|---------------|
\ 2*sigma / 1
-------------------- + -
2 2
>>> simplify(skewness(X))
0
>>> X = Normal("x", 0, 1) # Mean 0, standard deviation 1
>>> density(X)(z)
sqrt(2)*exp(-z**2/2)/(2*sqrt(pi))
>>> E(2*X + 1)
1
>>> simplify(std(2*X + 1))
2
References
==========
.. [1] http://en.wikipedia.org/wiki/Normal_distribution
.. [2] http://mathworld.wolfram.com/NormalDistributionFunction.html
"""
return rv(name, NormalDistribution, (mean, std))
#-------------------------------------------------------------------------------
# Pareto distribution ----------------------------------------------------------
class ParetoDistribution(SingleContinuousDistribution):
_argnames = ('xm', 'alpha')
@property
def set(self):
return Interval(self.xm, oo)
@staticmethod
def check(xm, alpha):
_value_check(xm > 0, "Xm must be positive")
_value_check(alpha > 0, "Alpha must be positive")
def pdf(self, x):
xm, alpha = self.xm, self.alpha
return alpha * xm**alpha / x**(alpha + 1)
def sample(self):
return random.paretovariate(self.alpha)
def Pareto(name, xm, alpha):
r"""
Create a continuous random variable with the Pareto distribution.
The density of the Pareto distribution is given by
.. math::
f(x) := \frac{\alpha\,x_m^\alpha}{x^{\alpha+1}}
with :math:`x \in [x_m,\infty]`.
Parameters
==========
xm : Real number, `x_m > 0`, a scale
alpha : Real number, `\alpha > 0`, a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Pareto, density
>>> from sympy import Symbol
>>> xm = Symbol("xm", positive=True)
>>> beta = Symbol("beta", positive=True)
>>> z = Symbol("z")
>>> X = Pareto("x", xm, beta)
>>> density(X)(z)
beta*xm**beta*z**(-beta - 1)
References
==========
.. [1] http://en.wikipedia.org/wiki/Pareto_distribution
.. [2] http://mathworld.wolfram.com/ParetoDistribution.html
"""
return rv(name, ParetoDistribution, (xm, alpha))
#-------------------------------------------------------------------------------
# QuadraticU distribution ------------------------------------------------------
class QuadraticUDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b')
@property
def set(self):
return Interval(self.a, self.b)
def pdf(self, x):
a, b = self.a, self.b
alpha = 12 / (b-a)**3
beta = (a+b) / 2
return Piecewise(
(alpha * (x-beta)**2, And(a<=x, x<=b)),
(S.Zero, True))
def QuadraticU(name, a, b):
r"""
Create a Continuous Random Variable with a U-quadratic distribution.
The density of the U-quadratic distribution is given by
.. math::
f(x) := \alpha (x-\beta)^2
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number
b : Real number, :math:`a < b`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import QuadraticU, density, E, variance
>>> from sympy import Symbol, simplify, factor, pprint
>>> a = Symbol("a", real=True)
>>> b = Symbol("b", real=True)
>>> z = Symbol("z")
>>> X = QuadraticU("x", a, b)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ 2
| / a b \
|12*|- - - - + z|
| \ 2 2 /
<----------------- for And(a <= z, z <= b)
| 3
| (-a + b)
|
\ 0 otherwise
References
==========
.. [1] http://en.wikipedia.org/wiki/U-quadratic_distribution
"""
return rv(name, QuadraticUDistribution, (a, b))
#-------------------------------------------------------------------------------
# RaisedCosine distribution ----------------------------------------------------
class RaisedCosineDistribution(SingleContinuousDistribution):
_argnames = ('mu', 's')
@property
def set(self):
return Interval(self.mu - self.s, self.mu + self.s)
@staticmethod
def check(mu, s):
_value_check(s > 0, "s must be positive")
def pdf(self, x):
mu, s = self.mu, self.s
return Piecewise(
((1+cos(pi*(x-mu)/s)) / (2*s), And(mu-s<=x, x<=mu+s)),
(S.Zero, True))
def RaisedCosine(name, mu, s):
r"""
Create a Continuous Random Variable with a raised cosine distribution.
The density of the raised cosine distribution is given by
.. math::
f(x) := \frac{1}{2s}\left(1+\cos\left(\frac{x-\mu}{s}\pi\right)\right)
with :math:`x \in [\mu-s,\mu+s]`.
Parameters
==========
mu : Real number
s : Real number, `s > 0`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import RaisedCosine, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu", real=True)
>>> s = Symbol("s", positive=True)
>>> z = Symbol("z")
>>> X = RaisedCosine("x", mu, s)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
/ /pi*(-mu + z)\
|cos|------------| + 1
| \ s /
<--------------------- for And(z <= mu + s, mu - s <= z)
| 2*s
|
\ 0 otherwise
References
==========
.. [1] http://en.wikipedia.org/wiki/Raised_cosine_distribution
"""
return rv(name, RaisedCosineDistribution, (mu, s))
#-------------------------------------------------------------------------------
# Rayleigh distribution --------------------------------------------------------
class RayleighDistribution(SingleContinuousDistribution):
_argnames = ('sigma',)
set = Interval(0, oo)
def pdf(self, x):
sigma = self.sigma
return x/sigma**2*exp(-x**2/(2*sigma**2))
def Rayleigh(name, sigma):
r"""
Create a continuous random variable with a Rayleigh distribution.
The density of the Rayleigh distribution is given by
.. math ::
f(x) := \frac{x}{\sigma^2} e^{-x^2/2\sigma^2}
with :math:`x > 0`.
Parameters
==========
sigma : Real number, `\sigma > 0`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Rayleigh, density, E, variance
>>> from sympy import Symbol, simplify
>>> sigma = Symbol("sigma", positive=True)
>>> z = Symbol("z")
>>> X = Rayleigh("x", sigma)
>>> density(X)(z)
z*exp(-z**2/(2*sigma**2))/sigma**2
>>> E(X)
sqrt(2)*sqrt(pi)*sigma/2
>>> variance(X)
-pi*sigma**2/2 + 2*sigma**2
References
==========
.. [1] http://en.wikipedia.org/wiki/Rayleigh_distribution
.. [2] http://mathworld.wolfram.com/RayleighDistribution.html
"""
return rv(name, RayleighDistribution, (sigma, ))
#-------------------------------------------------------------------------------
# StudentT distribution --------------------------------------------------------
class StudentTDistribution(SingleContinuousDistribution):
_argnames = ('nu',)
def pdf(self, x):
nu = self.nu
return 1/(sqrt(nu)*beta_fn(S(1)/2, nu/2))*(1 + x**2/nu)**(-(nu + 1)/2)
def StudentT(name, nu):
r"""
Create a continuous random variable with a student's t distribution.
The density of the student's t distribution is given by
.. math::
f(x) := \frac{\Gamma \left(\frac{\nu+1}{2} \right)}
{\sqrt{\nu\pi}\Gamma \left(\frac{\nu}{2} \right)}
\left(1+\frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
Parameters
==========
nu : Real number, `\nu > 0`, the degrees of freedom
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import StudentT, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> nu = Symbol("nu", positive=True)
>>> z = Symbol("z")
>>> X = StudentT("x", nu)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
nu 1
- -- - -
2 2
/ 2\
| z |
|1 + --|
\ nu/
--------------------
____ / nu\
\/ nu *beta|1/2, --|
\ 2 /
References
==========
.. [1] http://en.wikipedia.org/wiki/Student_t-distribution
.. [2] http://mathworld.wolfram.com/Studentst-Distribution.html
"""
return rv(name, StudentTDistribution, (nu, ))
#-------------------------------------------------------------------------------
# Triangular distribution ------------------------------------------------------
class TriangularDistribution(SingleContinuousDistribution):
_argnames = ('a', 'b', 'c')
def pdf(self, x):
a, b, c = self.a, self.b, self.c
return Piecewise(
(2*(x - a)/((b - a)*(c - a)), And(a <= x, x < c)),
(2/(b - a), Eq(x, c)),
(2*(b - x)/((b - a)*(b - c)), And(c < x, x <= b)),
(S.Zero, True))
def Triangular(name, a, b, c):
r"""
Create a continuous random variable with a triangular distribution.
The density of the triangular distribution is given by
.. math::
f(x) := \begin{cases}
0 & \mathrm{for\ } x < a, \\
\frac{2(x-a)}{(b-a)(c-a)} & \mathrm{for\ } a \le x < c, \\
\frac{2}{b-a} & \mathrm{for\ } x = c, \\
\frac{2(b-x)}{(b-a)(b-c)} & \mathrm{for\ } c < x \le b, \\
0 & \mathrm{for\ } b < x.
\end{cases}
Parameters
==========
a : Real number, :math:`a \in \left(-\infty, \infty\right)`
b : Real number, :math:`a < b`
c : Real number, :math:`a \leq c \leq b`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Triangular, density, E
>>> from sympy import Symbol, pprint
>>> a = Symbol("a")
>>> b = Symbol("b")
>>> c = Symbol("c")
>>> z = Symbol("z")
>>> X = Triangular("x", a,b,c)
>>> pprint(density(X)(z), use_unicode=False)
/ -2*a + 2*z
|----------------- for And(a <= z, z < c)
|(-a + b)*(-a + c)
|
| 2
| ------ for z = c
< -a + b
|
| 2*b - 2*z
|---------------- for And(z <= b, c < z)
|(-a + b)*(b - c)
|
\ 0 otherwise
References
==========
.. [1] http://en.wikipedia.org/wiki/Triangular_distribution
.. [2] http://mathworld.wolfram.com/TriangularDistribution.html
"""
return rv(name, TriangularDistribution, (a, b, c))
#-------------------------------------------------------------------------------
# Uniform distribution ---------------------------------------------------------
class UniformDistribution(SingleContinuousDistribution):
_argnames = ('left', 'right')
def pdf(self, x):
left, right = self.left, self.right
return Piecewise(
(S.One/(right - left), And(left <= x, x <= right)),
(S.Zero, True))
def compute_cdf(self, **kwargs):
from sympy import Lambda, Min
z = Dummy('z', real=True, finite=True)
result = SingleContinuousDistribution.compute_cdf(self, **kwargs)(z)
reps = {
Min(z, self.right): z,
Min(z, self.left, self.right): self.left,
Min(z, self.left): self.left}
result = result.subs(reps)
return Lambda(z, result)
def expectation(self, expr, var, **kwargs):
from sympy import Max, Min
kwargs['evaluate'] = True
result = SingleContinuousDistribution.expectation(self, expr, var, **kwargs)
result = result.subs({Max(self.left, self.right): self.right,
Min(self.left, self.right): self.left})
return result
def sample(self):
return random.uniform(self.left, self.right)
def Uniform(name, left, right):
r"""
Create a continuous random variable with a uniform distribution.
The density of the uniform distribution is given by
.. math::
f(x) := \begin{cases}
\frac{1}{b - a} & \text{for } x \in [a,b] \\
0 & \text{otherwise}
\end{cases}
with :math:`x \in [a,b]`.
Parameters
==========
a : Real number, :math:`-\infty < a` the left boundary
b : Real number, :math:`a < b < \infty` the right boundary
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Uniform, density, cdf, E, variance, skewness
>>> from sympy import Symbol, simplify
>>> a = Symbol("a", negative=True)
>>> b = Symbol("b", positive=True)
>>> z = Symbol("z")
>>> X = Uniform("x", a, b)
>>> density(X)(z)
Piecewise((1/(-a + b), And(a <= z, z <= b)), (0, True))
>>> cdf(X)(z) # doctest: +SKIP
-a/(-a + b) + z/(-a + b)
>>> simplify(E(X))
a/2 + b/2
>>> simplify(variance(X))
a**2/12 - a*b/6 + b**2/12
References
==========
.. [1] http://en.wikipedia.org/wiki/Uniform_distribution_%28continuous%29
.. [2] http://mathworld.wolfram.com/UniformDistribution.html
"""
return rv(name, UniformDistribution, (left, right))
#-------------------------------------------------------------------------------
# UniformSum distribution ------------------------------------------------------
class UniformSumDistribution(SingleContinuousDistribution):
_argnames = ('n',)
@property
def set(self):
return Interval(0, self.n)
def pdf(self, x):
n = self.n
k = Dummy("k")
return 1/factorial(
n - 1)*Sum((-1)**k*binomial(n, k)*(x - k)**(n - 1), (k, 0, floor(x)))
def UniformSum(name, n):
r"""
Create a continuous random variable with an Irwin-Hall distribution.
The probability distribution function depends on a single parameter
`n` which is an integer.
The density of the Irwin-Hall distribution is given by
.. math ::
f(x) := \frac{1}{(n-1)!}\sum_{k=0}^{\lfloor x\rfloor}(-1)^k
\binom{n}{k}(x-k)^{n-1}
Parameters
==========
n : A positive Integer, `n > 0`
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import UniformSum, density
>>> from sympy import Symbol, pprint
>>> n = Symbol("n", integer=True)
>>> z = Symbol("z")
>>> X = UniformSum("x", n)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
floor(z)
___
\ `
\ k n - 1 /n\
) (-1) *(-k + z) *| |
/ \k/
/__,
k = 0
--------------------------------
(n - 1)!
References
==========
.. [1] http://en.wikipedia.org/wiki/Uniform_sum_distribution
.. [2] http://mathworld.wolfram.com/UniformSumDistribution.html
"""
return rv(name, UniformSumDistribution, (n, ))
#-------------------------------------------------------------------------------
# VonMises distribution --------------------------------------------------------
class VonMisesDistribution(SingleContinuousDistribution):
_argnames = ('mu', 'k')
set = Interval(0, 2*pi)
@staticmethod
def check(mu, k):
_value_check(k > 0, "k must be positive")
def pdf(self, x):
mu, k = self.mu, self.k
return exp(k*cos(x-mu)) / (2*pi*besseli(0, k))
def VonMises(name, mu, k):
r"""
Create a Continuous Random Variable with a von Mises distribution.
The density of the von Mises distribution is given by
.. math::
f(x) := \frac{e^{\kappa\cos(x-\mu)}}{2\pi I_0(\kappa)}
with :math:`x \in [0,2\pi]`.
Parameters
==========
mu : Real number, measure of location
k : Real number, measure of concentration
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import VonMises, density, E, variance
>>> from sympy import Symbol, simplify, pprint
>>> mu = Symbol("mu")
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = VonMises("x", mu, k)
>>> D = density(X)(z)
>>> pprint(D, use_unicode=False)
k*cos(mu - z)
e
------------------
2*pi*besseli(0, k)
References
==========
.. [1] http://en.wikipedia.org/wiki/Von_Mises_distribution
.. [2] http://mathworld.wolfram.com/vonMisesDistribution.html
"""
return rv(name, VonMisesDistribution, (mu, k))
#-------------------------------------------------------------------------------
# Weibull distribution ---------------------------------------------------------
class WeibullDistribution(SingleContinuousDistribution):
_argnames = ('alpha', 'beta')
set = Interval(0, oo)
@staticmethod
def check(alpha, beta):
_value_check(alpha > 0, "Alpha must be positive")
_value_check(beta > 0, "Beta must be positive")
def pdf(self, x):
alpha, beta = self.alpha, self.beta
return beta * (x/alpha)**(beta - 1) * exp(-(x/alpha)**beta) / alpha
def sample(self):
return random.weibullvariate(self.alpha, self.beta)
def Weibull(name, alpha, beta):
r"""
Create a continuous random variable with a Weibull distribution.
The density of the Weibull distribution is given by
.. math::
f(x) := \begin{cases}
\frac{k}{\lambda}\left(\frac{x}{\lambda}\right)^{k-1}
e^{-(x/\lambda)^{k}} & x\geq0\\
0 & x<0
\end{cases}
Parameters
==========
lambda : Real number, :math:`\lambda > 0` a scale
k : Real number, `k > 0` a shape
Returns
=======
A RandomSymbol.
Examples
========
>>> from sympy.stats import Weibull, density, E, variance
>>> from sympy import Symbol, simplify
>>> l = Symbol("lambda", positive=True)
>>> k = Symbol("k", positive=True)
>>> z = Symbol("z")
>>> X = Weibull("x", l, k)
>>> density(X)(z)
k*(z/lambda)**(k - 1)*exp(-(z/lambda)**k)/lambda
>>> simplify(E(X))
lambda*gamma(1 + 1/k)
>>> simplify(variance(X))
lambda**2*(-gamma(1 + 1/k)**2 + gamma(1 + 2/k))
References
==========
.. [1] http://en.wikipedia.org/wiki/Weibull_distribution
.. [2] http://mathworld.wolfram.com/WeibullDistribution.html
"""
return rv(name, WeibullDistribution, (alpha, beta))
#-------------------------------------------------------------------------------
# Wigner semicircle distribution -----------------------------------------------
class WignerSemicircleDistribution(SingleContinuousDistribution):
_argnames = ('R',)
@property
def set(self):
return Interval(-self.R, self.R)
def pdf(self, x):
R = self.R
return 2/(pi*R**2)*sqrt(R**2 - x**2)
def WignerSemicircle(name, R):
r"""
Create a continuous random variable with a Wigner semicircle distribution.
The density of the Wigner semicircle distribution is given by
.. math::
f(x) := \frac2{\pi R^2}\,\sqrt{R^2-x^2}
with :math:`x \in [-R,R]`.
Parameters
==========
R : Real number, `R > 0`, the radius
Returns
=======
A `RandomSymbol`.
Examples
========
>>> from sympy.stats import WignerSemicircle, density, E
>>> from sympy import Symbol, simplify
>>> R = Symbol("R", positive=True)
>>> z = Symbol("z")
>>> X = WignerSemicircle("x", R)
>>> density(X)(z)
2*sqrt(R**2 - z**2)/(pi*R**2)
>>> E(X)
0
References
==========
.. [1] http://en.wikipedia.org/wiki/Wigner_semicircle_distribution
.. [2] http://mathworld.wolfram.com/WignersSemicircleLaw.html
"""
return rv(name, WignerSemicircleDistribution, (R,))
|
bsd-3-clause
|
dracos/django
|
django/contrib/contenttypes/checks.py
|
107
|
1234
|
from itertools import chain
from django.apps import apps
from django.core.checks import Error
def check_generic_foreign_keys(app_configs=None, **kwargs):
from .fields import GenericForeignKey
if app_configs is None:
models = apps.get_models()
else:
models = chain.from_iterable(app_config.get_models() for app_config in app_configs)
errors = []
fields = (
obj for model in models for obj in vars(model).values()
if isinstance(obj, GenericForeignKey)
)
for field in fields:
errors.extend(field.check())
return errors
def check_model_name_lengths(app_configs=None, **kwargs):
if app_configs is None:
models = apps.get_models()
else:
models = chain.from_iterable(app_config.get_models() for app_config in app_configs)
errors = []
for model in models:
if len(model._meta.model_name) > 100:
errors.append(
Error(
'Model names must be at most 100 characters (got %d).' % (
len(model._meta.model_name),
),
obj=model,
id='contenttypes.E005',
)
)
return errors
|
bsd-3-clause
|
f3r/scikit-learn
|
examples/model_selection/grid_search_text_feature_extraction.py
|
99
|
4163
|
"""
==========================================================
Sample pipeline for text feature extraction and evaluation
==========================================================
The dataset used in this example is the 20 newsgroups dataset which will be
automatically downloaded and then cached and reused for the document
classification example.
You can adjust the number of categories by giving their names to the dataset
loader or setting them to None to get the 20 of them.
Here is a sample output of a run on a quad-core machine::
Loading 20 newsgroups dataset for categories:
['alt.atheism', 'talk.religion.misc']
1427 documents
2 categories
Performing grid search...
pipeline: ['vect', 'tfidf', 'clf']
parameters:
{'clf__alpha': (1.0000000000000001e-05, 9.9999999999999995e-07),
'clf__n_iter': (10, 50, 80),
'clf__penalty': ('l2', 'elasticnet'),
'tfidf__use_idf': (True, False),
'vect__max_n': (1, 2),
'vect__max_df': (0.5, 0.75, 1.0),
'vect__max_features': (None, 5000, 10000, 50000)}
done in 1737.030s
Best score: 0.940
Best parameters set:
clf__alpha: 9.9999999999999995e-07
clf__n_iter: 50
clf__penalty: 'elasticnet'
tfidf__use_idf: True
vect__max_n: 2
vect__max_df: 0.75
vect__max_features: 50000
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Mathieu Blondel <mathieu@mblondel.org>
# License: BSD 3 clause
from __future__ import print_function
from pprint import pprint
from time import time
import logging
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
print(__doc__)
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
data = fetch_20newsgroups(subset='train', categories=categories)
print("%d documents" % len(data.filenames))
print("%d categories" % len(data.target_names))
print()
###############################################################################
# define a pipeline combining a text feature extractor with a simple
# classifier
pipeline = Pipeline([
('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', SGDClassifier()),
])
# uncommenting more parameters will give better exploring power but will
# increase processing time in a combinatorial way
parameters = {
'vect__max_df': (0.5, 0.75, 1.0),
#'vect__max_features': (None, 5000, 10000, 50000),
'vect__ngram_range': ((1, 1), (1, 2)), # unigrams or bigrams
#'tfidf__use_idf': (True, False),
#'tfidf__norm': ('l1', 'l2'),
'clf__alpha': (0.00001, 0.000001),
'clf__penalty': ('l2', 'elasticnet'),
#'clf__n_iter': (10, 50, 80),
}
if __name__ == "__main__":
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
print("Performing grid search...")
print("pipeline:", [name for name, _ in pipeline.steps])
print("parameters:")
pprint(parameters)
t0 = time()
grid_search.fit(data.data, data.target)
print("done in %0.3fs" % (time() - t0))
print()
print("Best score: %0.3f" % grid_search.best_score_)
print("Best parameters set:")
best_parameters = grid_search.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
|
bsd-3-clause
|
pnedunuri/scikit-learn
|
examples/decomposition/plot_image_denoising.py
|
181
|
5819
|
"""
=========================================
Image denoising using dictionary learning
=========================================
An example comparing the effect of reconstructing noisy fragments
of the Lena image using firstly online :ref:`DictionaryLearning` and
various transform methods.
The dictionary is fitted on the distorted left half of the image, and
subsequently used to reconstruct the right half. Note that even better
performance could be achieved by fitting to an undistorted (i.e.
noiseless) image, but here we start from the assumption that it is not
available.
A common practice for evaluating the results of image denoising is by looking
at the difference between the reconstruction and the original image. If the
reconstruction is perfect this will look like Gaussian noise.
It can be seen from the plots that the results of :ref:`omp` with two
non-zero coefficients is a bit less biased than when keeping only one
(the edges look less prominent). It is in addition closer from the ground
truth in Frobenius norm.
The result of :ref:`least_angle_regression` is much more strongly biased: the
difference is reminiscent of the local intensity value of the original image.
Thresholding is clearly not useful for denoising, but it is here to show that
it can produce a suggestive output with very high speed, and thus be useful
for other tasks such as object classification, where performance is not
necessarily related to visualisation.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
import numpy as np
from scipy.misc import lena
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.feature_extraction.image import extract_patches_2d
from sklearn.feature_extraction.image import reconstruct_from_patches_2d
###############################################################################
# Load Lena image and extract patches
lena = lena() / 256.0
# downsample for higher speed
lena = lena[::2, ::2] + lena[1::2, ::2] + lena[::2, 1::2] + lena[1::2, 1::2]
lena /= 4.0
height, width = lena.shape
# Distort the right half of the image
print('Distorting image...')
distorted = lena.copy()
distorted[:, height // 2:] += 0.075 * np.random.randn(width, height // 2)
# Extract all reference patches from the left half of the image
print('Extracting reference patches...')
t0 = time()
patch_size = (7, 7)
data = extract_patches_2d(distorted[:, :height // 2], patch_size)
data = data.reshape(data.shape[0], -1)
data -= np.mean(data, axis=0)
data /= np.std(data, axis=0)
print('done in %.2fs.' % (time() - t0))
###############################################################################
# Learn the dictionary from reference patches
print('Learning the dictionary...')
t0 = time()
dico = MiniBatchDictionaryLearning(n_components=100, alpha=1, n_iter=500)
V = dico.fit(data).components_
dt = time() - t0
print('done in %.2fs.' % dt)
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(V[:100]):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape(patch_size), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('Dictionary learned from Lena patches\n' +
'Train time %.1fs on %d patches' % (dt, len(data)),
fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
###############################################################################
# Display the distorted image
def show_with_diff(image, reference, title):
"""Helper function to display denoising"""
plt.figure(figsize=(5, 3.3))
plt.subplot(1, 2, 1)
plt.title('Image')
plt.imshow(image, vmin=0, vmax=1, cmap=plt.cm.gray, interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.subplot(1, 2, 2)
difference = image - reference
plt.title('Difference (norm: %.2f)' % np.sqrt(np.sum(difference ** 2)))
plt.imshow(difference, vmin=-0.5, vmax=0.5, cmap=plt.cm.PuOr,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle(title, size=16)
plt.subplots_adjust(0.02, 0.02, 0.98, 0.79, 0.02, 0.2)
show_with_diff(distorted, lena, 'Distorted image')
###############################################################################
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = extract_patches_2d(distorted[:, height // 2:], patch_size)
data = data.reshape(data.shape[0], -1)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))
transform_algorithms = [
('Orthogonal Matching Pursuit\n1 atom', 'omp',
{'transform_n_nonzero_coefs': 1}),
('Orthogonal Matching Pursuit\n2 atoms', 'omp',
{'transform_n_nonzero_coefs': 2}),
('Least-angle regression\n5 atoms', 'lars',
{'transform_n_nonzero_coefs': 5}),
('Thresholding\n alpha=0.1', 'threshold', {'transform_alpha': .1})]
reconstructions = {}
for title, transform_algorithm, kwargs in transform_algorithms:
print(title + '...')
reconstructions[title] = lena.copy()
t0 = time()
dico.set_params(transform_algorithm=transform_algorithm, **kwargs)
code = dico.transform(data)
patches = np.dot(code, V)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
patches += intercept
patches = patches.reshape(len(data), *patch_size)
if transform_algorithm == 'threshold':
patches -= patches.min()
patches /= patches.max()
reconstructions[title][:, height // 2:] = reconstruct_from_patches_2d(
patches, (width, height // 2))
dt = time() - t0
print('done in %.2fs.' % dt)
show_with_diff(reconstructions[title], lena,
title + ' (time: %.1fs)' % dt)
plt.show()
|
bsd-3-clause
|
robinro/ansible
|
lib/ansible/modules/cloud/cloudstack/cs_network_acl.py
|
27
|
6173
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_network_acl
short_description: Manages network access control lists (ACL) on Apache CloudStack based clouds.
description:
- Create and remove network ACLs.
version_added: "2.4"
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the network ACL.
required: true
description:
description:
- Description of the network ACL.
- If not set, identical to C(name).
required: false
default: null
vpc:
description:
- VPC the network ACL is related to.
required: true
state:
description:
- State of the network ACL.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
domain:
description:
- Domain the network ACL rule is related to.
required: false
default: null
account:
description:
- Account the network ACL rule is related to.
required: false
default: null
project:
description:
- Name of the project the network ACL is related to.
required: false
default: null
zone:
description:
- Name of the zone the VPC is related to.
- If not set, default zone is used.
required: false
default: null
poll_async:
description:
- Poll async jobs until job has finished.
required: false
default: true
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# create a network ACL
local_action:
module: cs_network_acl
name: Webserver ACL
description: a more detailed description of the ACL
vpc: customers
# remove a network ACL
local_action:
module: cs_network_acl
name: Webserver ACL
vpc: customers
state: absent
'''
RETURN = '''
---
name:
description: Name of the network ACL.
returned: success
type: string
sample: customer acl
description:
description: Description of the network ACL.
returned: success
type: string
sample: Example description of a network ACL
vpc:
description: VPC of the network ACL.
returned: success
type: string
sample: customer vpc
zone:
description: Zone the VPC is related to.
returned: success
type: string
sample: ch-gva-2
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
CloudStackException,
cs_argument_spec,
cs_required_together
)
class AnsibleCloudStackNetworkAcl(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackNetworkAcl, self).__init__(module)
def get_network_acl(self):
args = {
'name': self.module.params.get('name'),
'vpcid': self.get_vpc(key='id'),
}
network_acls = self.cs.listNetworkACLLists(**args)
if network_acls:
return network_acls['networkacllist'][0]
return None
def present_network_acl(self):
network_acl = self.get_network_acl()
if not network_acl:
self.result['changed'] = True
args = {
'name': self.module.params.get('name'),
'description': self.get_or_fallback('description', 'name'),
'vpcid': self.get_vpc(key='id')
}
if not self.module.check_mode:
res = self.cs.createNetworkACLList(**args)
if 'errortext' in res:
self.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
network_acl = self.poll_job(res, 'networkacllist')
return network_acl
def absent_network_acl(self):
network_acl = self.get_network_acl()
if network_acl:
self.result['changed'] = True
args = {
'id': network_acl['id'],
}
if not self.module.check_mode:
res = self.cs.deleteNetworkACLList(**args)
if 'errortext' in res:
self.fail_json(msg="Failed: '%s'" % res['errortext'])
poll_async = self.module.params.get('poll_async')
if poll_async:
self.poll_job(res, 'networkacllist')
return network_acl
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
vpc=dict(required=True),
state=dict(choices=['present', 'absent'], default='present'),
zone=dict(),
domain=dict(),
account=dict(),
project=dict(),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_network_acl = AnsibleCloudStackNetworkAcl(module)
state = module.params.get('state')
if state == 'absent':
network_acl = acs_network_acl.absent_network_acl()
else:
network_acl = acs_network_acl.present_network_acl()
result = acs_network_acl.get_result(network_acl)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
jaysuk/Printrun
|
printrun/gcodeplater.py
|
2
|
10956
|
#!/usr/bin/env python3
# This file is part of the Printrun suite.
#
# Printrun is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Printrun is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Printrun. If not, see <http://www.gnu.org/licenses/>.
# Set up Internationalization using gettext
# searching for installed locales on /usr/share; uses relative folder if not found (windows)
from .utils import install_locale, get_home_pos
install_locale('pronterface')
import wx
import sys
import os
import time
import types
import re
import math
import logging
from printrun import gcoder
from printrun.objectplater import make_plater, PlaterPanel
from printrun.gl.libtatlin import actors
import printrun.gui.viz # NOQA
from printrun import gcview
def extrusion_only(gline):
return gline.e is not None \
and (gline.x, gline.y, gline.z) == (None, None, None)
# Custom method for gcoder.GCode to analyze & output gcode in a single call
def gcoder_write(self, f, line, store = False):
f.write(line)
self.append(line, store = store)
rewrite_exp = re.compile("(%s)" % "|".join(["X([-+]?[0-9]*\.?[0-9]*)",
"Y([-+]?[0-9]*\.?[0-9]*)"]))
def rewrite_gline(centeroffset, gline, cosr, sinr):
if gline.is_move and (gline.x is not None or gline.y is not None):
if gline.relative:
xc = yc = 0
cox = coy = 0
if gline.x is not None:
xc = gline.x
if gline.y is not None:
yc = gline.y
else:
xc = gline.current_x + centeroffset[0]
yc = gline.current_y + centeroffset[1]
cox = centeroffset[0]
coy = centeroffset[1]
new_x = "X%.04f" % (xc * cosr - yc * sinr - cox)
new_y = "Y%.04f" % (xc * sinr + yc * cosr - coy)
new = {"X": new_x, "Y": new_y}
new_line = rewrite_exp.sub(lambda ax: new[ax.group()[0]], gline.raw)
new_line = new_line.split(";")[0]
if gline.x is None: new_line += " " + new_x
if gline.y is None: new_line += " " + new_y
return new_line
else:
return gline.raw
class GcodePlaterPanel(PlaterPanel):
load_wildcard = _("GCODE files (*.gcode;*.GCODE;*.g)") + "|*.gcode;*.gco;*.g"
save_wildcard = _("GCODE files (*.gcode;*.GCODE;*.g)") + "|*.gcode;*.gco;*.g"
def prepare_ui(self, filenames = [], callback = None,
parent = None, build_dimensions = None,
circular_platform = False,
antialias_samples = 0,
grid = (1, 10)):
super(GcodePlaterPanel, self).prepare_ui(filenames, callback, parent, build_dimensions)
viewer = gcview.GcodeViewPanel(self, build_dimensions = self.build_dimensions,
antialias_samples = antialias_samples)
self.set_viewer(viewer)
self.platform = actors.Platform(self.build_dimensions,
circular = circular_platform,
grid = grid)
self.platform_object = gcview.GCObject(self.platform)
def get_objects(self):
return [self.platform_object] + list(self.models.values())
objects = property(get_objects)
def load_file(self, filename):
gcode = gcoder.GCode(open(filename, "rU"),
get_home_pos(self.build_dimensions))
model = actors.GcodeModel()
if gcode.filament_length > 0:
model.display_travels = False
generator = model.load_data(gcode)
generator_output = next(generator)
while generator_output is not None:
generator_output = next(generator)
obj = gcview.GCObject(model)
obj.offsets = [self.build_dimensions[3], self.build_dimensions[4], 0]
obj.gcode = gcode
obj.dims = [gcode.xmin, gcode.xmax,
gcode.ymin, gcode.ymax,
gcode.zmin, gcode.zmax]
obj.centeroffset = [-(obj.dims[1] + obj.dims[0]) / 2,
-(obj.dims[3] + obj.dims[2]) / 2,
0]
self.add_model(filename, obj)
wx.CallAfter(self.Refresh)
def done(self, event, cb):
if not os.path.exists("tempgcode"):
os.mkdir("tempgcode")
name = "tempgcode/" + str(int(time.time()) % 10000) + ".gcode"
self.export_to(name)
if cb is not None:
cb(name)
if self.destroy_on_done:
self.Destroy()
# What's hard in there ?
# 1) [x] finding the order in which the objects are printed
# 2) [x] handling layers correctly
# 3) [x] handling E correctly
# 4) [x] handling position shifts: should we either reset absolute 0 using
# G92 or should we rewrite all positions ? => we use G92s
# 5) [ ] handling the start & end gcode properly ?
# 6) [x] handling of current tool
# 7) [x] handling of Z moves for sequential printing (don't lower Z before
# reaching the next object print area)
# 8) [x] handling of absolute/relative status
# Initial implementation should just print the objects sequentially,
# but the end goal is to have a clean per-layer merge
def export_to(self, name):
return self.export_combined(name)
return self.export_sequential(name)
def export_combined(self, name):
models = list(self.models.values())
last_real_position = None
# Sort models by Z max to print smaller objects first
models.sort(key = lambda x: x.dims[-1])
alllayers = []
for (model_i, model) in enumerate(models):
def add_offset(layer):
return layer.z + model.offsets[2] if layer.z is not None else layer.z
alllayers += [(add_offset(layer), model_i, layer_i)
for (layer_i, layer) in enumerate(model.gcode.all_layers) if add_offset(layer) is not None]
alllayers.sort()
laste = [0] * len(models)
lasttool = [0] * len(models)
lastrelative = [False] * len(models)
with open(name, "w") as f:
analyzer = gcoder.GCode(None, get_home_pos(self.build_dimensions))
analyzer.write = types.MethodType(lambda self, line: gcoder_write(self, f, line), analyzer)
for (layer_z, model_i, layer_i) in alllayers:
model = models[model_i]
layer = model.gcode.all_layers[layer_i]
r = math.radians(model.rot)
o = model.offsets
co = model.centeroffset
offset_pos = last_real_position if last_real_position is not None else (0, 0, 0)
analyzer.write("; %f %f %f\n" % offset_pos)
trans = (- (o[0] + co[0]),
- (o[1] + co[1]),
- (o[2] + co[2]))
trans_wpos = (offset_pos[0] + trans[0],
offset_pos[1] + trans[1],
offset_pos[2] + trans[2])
analyzer.write("; GCodePlater: Model %d Layer %d at Z = %s\n" % (model_i, layer_i, layer_z))
if lastrelative[model_i]:
analyzer.write("G91\n")
else:
analyzer.write("G90\n")
if analyzer.current_tool != lasttool[model_i]:
analyzer.write("T%d\n" % lasttool[model_i])
analyzer.write("G92 X%.5f Y%.5f Z%.5f\n" % trans_wpos)
analyzer.write("G92 E%.5f\n" % laste[model_i])
for l in layer:
if l.command != "G28" and (l.command != "G92" or extrusion_only(l)):
if r == 0:
analyzer.write(l.raw + "\n")
else:
analyzer.write(rewrite_gline(co, l, math.cos(r), math.sin(r)) + "\n")
# Find the current real position & E
last_real_position = analyzer.current_pos
laste[model_i] = analyzer.current_e
lastrelative[model_i] = analyzer.relative
lasttool[model_i] = analyzer.current_tool
logging.info(_("Exported merged G-Codes to %s") % name)
def export_sequential(self, name):
models = list(self.models.values())
last_real_position = None
# Sort models by Z max to print smaller objects first
models.sort(key = lambda x: x.dims[-1])
with open(name, "w") as f:
for model_i, model in enumerate(models):
r = math.radians(model.rot)
o = model.offsets
co = model.centeroffset
offset_pos = last_real_position if last_real_position is not None else (0, 0, 0)
trans = (- (o[0] + co[0]),
- (o[1] + co[1]),
- (o[2] + co[2]))
trans_wpos = (offset_pos[0] + trans[0],
offset_pos[1] + trans[1],
offset_pos[2] + trans[2])
f.write("; GCodePlater: Model %d\n" % model_i)
f.write("G90\n")
f.write("G92 X%.5f Y%.5f Z%.5f E0\n" % trans_wpos)
f.write("G1 X%.5f Y%.5f" % (-co[0], -co[1]))
for l in model.gcode:
if l.command != "G28" and (l.command != "G92" or extrusion_only(l)):
if r == 0:
f.write(l.raw + "\n")
else:
f.write(rewrite_gline(co, l, math.cos(r), math.sin(r)) + "\n")
# Find the current real position
for i in range(len(model.gcode) - 1, -1, -1):
gline = model.gcode.lines[i]
if gline.is_move:
last_real_position = (- trans[0] + gline.current_x,
- trans[1] + gline.current_y,
- trans[2] + gline.current_z)
break
logging.info(_("Exported merged G-Codes to %s") % name)
GcodePlater = make_plater(GcodePlaterPanel)
if __name__ == '__main__':
app = wx.App(False)
main = GcodePlater(filenames = sys.argv[1:])
for fn in main.filenames:
main.load_file(fn)
main.filenames = None
main.autoplate()
main.export_to("gcodeplate___test.gcode")
raise SystemExit
main.Show()
app.MainLoop()
|
gpl-3.0
|
marcoantoniooliveira/labweb
|
oscar/lib/python2.7/site-packages/docutils/parsers/rst/directives/html.py
|
128
|
3098
|
# $Id: html.py 7320 2012-01-19 22:33:02Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
Directives for typically HTML-specific constructs.
"""
__docformat__ = 'reStructuredText'
import sys
from docutils import nodes, utils
from docutils.parsers.rst import Directive
from docutils.parsers.rst import states
from docutils.transforms import components
class MetaBody(states.SpecializedBody):
class meta(nodes.Special, nodes.PreBibliographic, nodes.Element):
"""HTML-specific "meta" element."""
pass
def field_marker(self, match, context, next_state):
"""Meta element."""
node, blank_finish = self.parsemeta(match)
self.parent += node
return [], next_state, []
def parsemeta(self, match):
name = self.parse_field_marker(match)
indented, indent, line_offset, blank_finish = \
self.state_machine.get_first_known_indented(match.end())
node = self.meta()
pending = nodes.pending(components.Filter,
{'component': 'writer',
'format': 'html',
'nodes': [node]})
node['content'] = ' '.join(indented)
if not indented:
line = self.state_machine.line
msg = self.reporter.info(
'No content for meta tag "%s".' % name,
nodes.literal_block(line, line))
return msg, blank_finish
tokens = name.split()
try:
attname, val = utils.extract_name_value(tokens[0])[0]
node[attname.lower()] = val
except utils.NameValueError:
node['name'] = tokens[0]
for token in tokens[1:]:
try:
attname, val = utils.extract_name_value(token)[0]
node[attname.lower()] = val
except utils.NameValueError, detail:
line = self.state_machine.line
msg = self.reporter.error(
'Error parsing meta tag attribute "%s": %s.'
% (token, detail), nodes.literal_block(line, line))
return msg, blank_finish
self.document.note_pending(pending)
return pending, blank_finish
class Meta(Directive):
has_content = True
SMkwargs = {'state_classes': (MetaBody,)}
def run(self):
self.assert_has_content()
node = nodes.Element()
new_line_offset, blank_finish = self.state.nested_list_parse(
self.content, self.content_offset, node,
initial_state='MetaBody', blank_finish=True,
state_machine_kwargs=self.SMkwargs)
if (new_line_offset - self.content_offset) != len(self.content):
# incomplete parse of block?
error = self.state_machine.reporter.error(
'Invalid meta directive.',
nodes.literal_block(self.block_text, self.block_text),
line=self.lineno)
node += error
return node.children
|
bsd-3-clause
|
raviflipsyde/servo
|
tests/wpt/web-platform-tests/tools/py/testing/path/test_svnauth.py
|
163
|
16079
|
import py
import svntestbase
from py.path import SvnAuth
import time
import sys
svnbin = py.path.local.sysfind('svn')
def make_repo_auth(repo, userdata):
""" write config to repo
user information in userdata is used for auth
userdata has user names as keys, and a tuple (password, readwrite) as
values, where 'readwrite' is either 'r' or 'rw'
"""
confdir = py.path.local(repo).join('conf')
confdir.join('svnserve.conf').write('''\
[general]
anon-access = none
password-db = passwd
authz-db = authz
realm = TestRepo
''')
authzdata = '[/]\n'
passwddata = '[users]\n'
for user in userdata:
authzdata += '%s = %s\n' % (user, userdata[user][1])
passwddata += '%s = %s\n' % (user, userdata[user][0])
confdir.join('authz').write(authzdata)
confdir.join('passwd').write(passwddata)
def serve_bg(repopath):
pidfile = py.path.local(repopath).join('pid')
port = 10000
e = None
while port < 10010:
cmd = 'svnserve -d -T --listen-port=%d --pid-file=%s -r %s' % (
port, pidfile, repopath)
print(cmd)
try:
py.process.cmdexec(cmd)
except py.process.cmdexec.Error:
e = sys.exc_info()[1]
else:
# XXX we assume here that the pid file gets written somewhere, I
# guess this should be relatively safe... (I hope, at least?)
counter = pid = 0
while counter < 10:
counter += 1
try:
pid = pidfile.read()
except py.error.ENOENT:
pass
if pid:
break
time.sleep(0.2)
return port, int(pid)
port += 1
raise IOError('could not start svnserve: %s' % (e,))
class TestSvnAuth(object):
def test_basic(self):
auth = SvnAuth('foo', 'bar')
assert auth.username == 'foo'
assert auth.password == 'bar'
assert str(auth)
def test_makecmdoptions_uname_pw_makestr(self):
auth = SvnAuth('foo', 'bar')
assert auth.makecmdoptions() == '--username="foo" --password="bar"'
def test_makecmdoptions_quote_escape(self):
auth = SvnAuth('fo"o', '"ba\'r"')
assert auth.makecmdoptions() == '--username="fo\\"o" --password="\\"ba\'r\\""'
def test_makecmdoptions_no_cache_auth(self):
auth = SvnAuth('foo', 'bar', cache_auth=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--no-auth-cache')
def test_makecmdoptions_no_interactive(self):
auth = SvnAuth('foo', 'bar', interactive=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--non-interactive')
def test_makecmdoptions_no_interactive_no_cache_auth(self):
auth = SvnAuth('foo', 'bar', cache_auth=False,
interactive=False)
assert auth.makecmdoptions() == ('--username="foo" --password="bar" '
'--no-auth-cache --non-interactive')
class svnwc_no_svn(py.path.svnwc):
def __new__(cls, *args, **kwargs):
self = super(svnwc_no_svn, cls).__new__(cls, *args, **kwargs)
self.commands = []
return self
def _svn(self, *args):
self.commands.append(args)
class TestSvnWCAuth(object):
def setup_method(self, meth):
if not svnbin:
py.test.skip("svn binary required")
self.auth = SvnAuth('user', 'pass', cache_auth=False)
def test_checkout(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_commit(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.commit('msg')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_checkout_no_cache_auth(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
def test_checkout_auth_from_constructor(self):
wc = svnwc_no_svn('foo', auth=self.auth)
wc.checkout('url')
assert wc.commands[0][-1] == ('--username="user" --password="pass" '
'--no-auth-cache')
class svnurl_no_svn(py.path.svnurl):
cmdexec_output = 'test'
popen_output = 'test'
def __new__(cls, *args, **kwargs):
self = super(svnurl_no_svn, cls).__new__(cls, *args, **kwargs)
self.commands = []
return self
def _cmdexec(self, cmd):
self.commands.append(cmd)
return self.cmdexec_output
def _popen(self, cmd):
self.commands.append(cmd)
return self.popen_output
class TestSvnURLAuth(object):
def setup_method(self, meth):
self.auth = SvnAuth('foo', 'bar')
def test_init(self):
u = svnurl_no_svn('http://foo.bar/svn')
assert u.auth is None
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
assert u.auth is self.auth
def test_new(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
new = u.new(basename='bar')
assert new.auth is self.auth
assert new.url == 'http://foo.bar/svn/bar'
def test_join(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
new = u.join('foo')
assert new.auth is self.auth
assert new.url == 'http://foo.bar/svn/foo'
def test_listdir(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u.cmdexec_output = '''\
1717 johnny 1529 Nov 04 14:32 LICENSE.txt
1716 johnny 5352 Nov 04 14:28 README.txt
'''
paths = u.listdir()
assert paths[0].auth is self.auth
assert paths[1].auth is self.auth
assert paths[0].basename == 'LICENSE.txt'
def test_info(self):
u = svnurl_no_svn('http://foo.bar/svn/LICENSE.txt', auth=self.auth)
def dirpath(self):
return self
u.cmdexec_output = '''\
1717 johnny 1529 Nov 04 14:32 LICENSE.txt
1716 johnny 5352 Nov 04 14:28 README.txt
'''
org_dp = u.__class__.dirpath
u.__class__.dirpath = dirpath
try:
info = u.info()
finally:
u.dirpath = org_dp
assert info.size == 1529
def test_open(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
foo = u.join('foo')
foo.check = lambda *args, **kwargs: True
ret = foo.open()
assert ret == 'test'
assert '--username="foo" --password="bar"' in foo.commands[0]
def test_dirpath(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
parent = u.dirpath()
assert parent.auth is self.auth
def test_mkdir(self):
u = svnurl_no_svn('http://foo.bar/svn/qweqwe', auth=self.auth)
assert not u.commands
u.mkdir(msg='created dir foo')
assert u.commands
assert '--username="foo" --password="bar"' in u.commands[0]
def test_copy(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u2 = svnurl_no_svn('http://foo.bar/svn2')
u.copy(u2, 'copied dir')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_rename(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.rename('http://foo.bar/svn/bar', 'moved foo to bar')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_remove(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.remove(msg='removing foo')
assert '--username="foo" --password="bar"' in u.commands[0]
def test_export(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
target = py.path.local('/foo')
u.export(target)
assert '--username="foo" --password="bar"' in u.commands[0]
def test_log(self):
u = svnurl_no_svn('http://foo.bar/svn/foo', auth=self.auth)
u.popen_output = py.io.TextIO(py.builtin._totext('''\
<?xml version="1.0"?>
<log>
<logentry revision="51381">
<author>guido</author>
<date>2008-02-11T12:12:18.476481Z</date>
<msg>Creating branch to work on auth support for py.path.svn*.
</msg>
</logentry>
</log>
''', 'ascii'))
u.check = lambda *args, **kwargs: True
ret = u.log(10, 20, verbose=True)
assert '--username="foo" --password="bar"' in u.commands[0]
assert len(ret) == 1
assert int(ret[0].rev) == 51381
assert ret[0].author == 'guido'
def test_propget(self):
u = svnurl_no_svn('http://foo.bar/svn', auth=self.auth)
u.propget('foo')
assert '--username="foo" --password="bar"' in u.commands[0]
def pytest_funcarg__setup(request):
return Setup(request)
class Setup:
def __init__(self, request):
if not svnbin:
py.test.skip("svn binary required")
if not request.config.option.runslowtests:
py.test.skip('use --runslowtests to run these tests')
tmpdir = request.getfuncargvalue("tmpdir")
repodir = tmpdir.join("repo")
py.process.cmdexec('svnadmin create %s' % repodir)
if sys.platform == 'win32':
repodir = '/' + str(repodir).replace('\\', '/')
self.repo = py.path.svnurl("file://%s" % repodir)
if py.std.sys.platform == 'win32':
# remove trailing slash...
repodir = repodir[1:]
self.repopath = py.path.local(repodir)
self.temppath = tmpdir.mkdir("temppath")
self.auth = SvnAuth('johnny', 'foo', cache_auth=False,
interactive=False)
make_repo_auth(self.repopath, {'johnny': ('foo', 'rw')})
self.port, self.pid = serve_bg(self.repopath.dirpath())
# XXX caching is too global
py.path.svnurl._lsnorevcache._dict.clear()
request.addfinalizer(lambda: py.process.kill(self.pid))
class TestSvnWCAuthFunctional:
def test_checkout_constructor_arg(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
assert wc.join('.svn').check()
def test_checkout_function_arg(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
assert wc.join('.svn').check()
def test_checkout_failing_non_interactive(self, setup):
auth = SvnAuth('johnny', 'bar', cache_auth=False,
interactive=False)
wc = py.path.svnwc(setup.temppath, auth)
py.test.raises(Exception,
("wc.checkout('svn://localhost:%(port)s/%(repopath)s')" %
setup.__dict__))
def test_log(self, setup):
wc = py.path.svnwc(setup.temppath, setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
foo = wc.ensure('foo.txt')
wc.commit('added foo.txt')
log = foo.log()
assert len(log) == 1
assert log[0].msg == 'added foo.txt'
def test_switch(self, setup):
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
svnurl = 'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename)
wc.checkout(svnurl)
wc.ensure('foo', dir=True).ensure('foo.txt').write('foo')
wc.commit('added foo dir with foo.txt file')
wc.ensure('bar', dir=True)
wc.commit('added bar dir')
bar = wc.join('bar')
bar.switch(svnurl + '/foo')
assert bar.join('foo.txt')
def test_update(self, setup):
wc1 = py.path.svnwc(setup.temppath.ensure('wc1', dir=True),
auth=setup.auth)
wc2 = py.path.svnwc(setup.temppath.ensure('wc2', dir=True),
auth=setup.auth)
wc1.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
wc2.checkout(
'svn://localhost:%s/%s' % (setup.port, setup.repopath.basename))
wc1.ensure('foo', dir=True)
wc1.commit('added foo dir')
wc2.update()
assert wc2.join('foo').check()
auth = SvnAuth('unknown', 'unknown', interactive=False)
wc2.auth = auth
py.test.raises(Exception, 'wc2.update()')
def test_lock_unlock_status(self, setup):
port = setup.port
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
wc.ensure('foo', file=True)
wc.commit('added foo file')
foo = wc.join('foo')
foo.lock()
status = foo.status()
assert status.locked
foo.unlock()
status = foo.status()
assert not status.locked
auth = SvnAuth('unknown', 'unknown', interactive=False)
foo.auth = auth
py.test.raises(Exception, 'foo.lock()')
py.test.raises(Exception, 'foo.unlock()')
def test_diff(self, setup):
port = setup.port
wc = py.path.svnwc(setup.temppath, auth=setup.auth)
wc.checkout(
'svn://localhost:%s/%s' % (port, setup.repopath.basename,))
wc.ensure('foo', file=True)
wc.commit('added foo file')
wc.update()
rev = int(wc.status().rev)
foo = wc.join('foo')
foo.write('bar')
diff = foo.diff()
assert '\n+bar\n' in diff
foo.commit('added some content')
diff = foo.diff()
assert not diff
diff = foo.diff(rev=rev)
assert '\n+bar\n' in diff
auth = SvnAuth('unknown', 'unknown', interactive=False)
foo.auth = auth
py.test.raises(Exception, 'foo.diff(rev=rev)')
class TestSvnURLAuthFunctional:
def test_listdir(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
u.ensure('foo')
paths = u.listdir()
assert len(paths) == 1
assert paths[0].auth is setup.auth
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
py.test.raises(Exception, 'u.listdir()')
def test_copy(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
foo = u.mkdir('foo')
assert foo.check()
bar = u.join('bar')
foo.copy(bar)
assert bar.check()
assert bar.auth is setup.auth
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
foo = u.join('foo')
bar = u.join('bar')
py.test.raises(Exception, 'foo.copy(bar)')
def test_write_read(self, setup):
port = setup.port
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=setup.auth)
foo = u.ensure('foo')
fp = foo.open()
try:
data = fp.read()
finally:
fp.close()
assert data == ''
auth = SvnAuth('foo', 'bar', interactive=False)
u = py.path.svnurl(
'svn://localhost:%s/%s' % (port, setup.repopath.basename),
auth=auth)
foo = u.join('foo')
py.test.raises(Exception, 'foo.open()')
# XXX rinse, repeat... :|
|
mpl-2.0
|
capone212/crashtec
|
src/crashtec/cdbprocessor/resultspublisher.py
|
1
|
2569
|
'''
Created on 04.05.2013
@author: capone
'''
from crashtec.db.provider import routines as dbroutines
from crashtec.db.schema.fields import PRIMARY_KEY_FIELD
import dbmodel
# Utility class which used to make hack with meta-classes. See results_metaclass()
class Acceptor(object):
def __init__(self, class_name):
self.class_name = class_name
def __call__(self, instance, visitor):
visit_method = getattr(visitor, 'visit_%s' % self.class_name)
visit_method(instance.parser_results)
# Utility class which used to make hack with meta-classes. See results_metaclass()
class BaseResult(object):
def __init__(self, parser_results):
self.parser_results = parser_results
def accept(self, visitor):
return self.delegated_accept(self, visitor)
# should be implemented in subclasses
def delegated_accept(self, instance, visitor):
raise RuntimeError("shouldn't be called!")
# Metaclass which generates wrapper classes for primitive parser results.
# Wrapper implements all necesserary methods for supporting visitors.
# Main intention of this trick is prevent code duplication.
# For example: If
def results_metaclass(class_name):
return type(class_name,
(BaseResult,),
{'delegated_accept' : Acceptor(class_name)})
# Visitor responsible for saving crash dump processing results.
class ResultsPublisher(object):
# It is important that task here took by reference, so we can update
# task record as well.
def __init__(self, task):
self.task = task
def visit_ModulesSectionParserResults(self, modules_list):
pass
def visit_RawOutpuSectionParserResults(self, raw_debugger_output):
raw_debugger_output = raw_debugger_output.decode('ascii', 'ignore')
d = dbmodel
new_record = dbroutines.Record()
new_record[d.RAWRESULTS_TASK_ID] = self.task[PRIMARY_KEY_FIELD]
new_record[d.RAWRESULTS_DBG_OUTPUT] = raw_debugger_output
dbroutines.create_new_record(d.RAWRESULTS_TABLE, new_record)
def visit_CrashSignatureParserResults(self, crash_signature):
d = dbmodel
self.task[d.TASKS_PROBLEM_CLASS] = crash_signature.problem_class
self.task[d.TASKS_SYMBOL_NAME] = crash_signature.symbol_name
self.task[d.TASKS_FAIL_IMAGE] = crash_signature.image_name
self.task[d.TASKS_FAILURE_BUCKET_ID] = crash_signature.failure_bucket_id
def visit_ProblemStackParserResuls(self, crash_call_stack):
pass
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.