repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
bobobox/ansible | lib/ansible/modules/cloud/openstack/os_router.py | 12 | 14139 | #!/usr/bin/python
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_router
short_description: Create or delete routers from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "David Shrewsbury (@Shrews)"
description:
- Create or Delete routers from OpenStack. Although Neutron allows
routers to share the same name, this module enforces name uniqueness
to be more user friendly.
options:
state:
description:
- Indicate desired state of the resource
choices: ['present', 'absent']
default: present
name:
description:
- Name to be give to the router
required: true
admin_state_up:
description:
- Desired admin state of the created or existing router.
required: false
default: true
enable_snat:
description:
- Enable Source NAT (SNAT) attribute.
required: false
default: true
network:
description:
- Unique name or ID of the external gateway network.
- required I(interfaces) or I(enable_snat) are provided.
type: string
required: false
default: None
project:
description:
- Unique name or ID of the project.
type: string
required: false
default: None
version_added: "2.2"
external_fixed_ips:
description:
- The IP address parameters for the external gateway network. Each
is a dictionary with the subnet name or ID (subnet) and the IP
address to assign on the subnet (ip). If no IP is specified,
one is automatically assigned from that subnet.
required: false
default: None
interfaces:
description:
- List of subnets to attach to the router internal interface.
required: false
default: None
availability_zone:
description:
- Ignored. Present for backwards compatability
required: false
requirements: ["shade"]
'''
EXAMPLES = '''
# Create a simple router, not attached to a gateway or subnets.
- os_router:
cloud: mycloud
state: present
name: simple_router
# Create a simple router, not attached to a gateway or subnets for a given project.
- os_router:
cloud: mycloud
state: present
name: simple_router
project: myproj
# Creates a router attached to ext_network1 on an IPv4 subnet and one
# internal subnet interface.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
interfaces:
- private-subnet
# Update existing router1 external gateway to include the IPv6 subnet.
# Note that since 'interfaces' is not provided, any existing internal
# interfaces on an existing router will be left intact.
- os_router:
cloud: mycloud
state: present
name: router1
network: ext_network1
external_fixed_ips:
- subnet: public-subnet
ip: 172.24.4.2
- subnet: ipv6-public-subnet
ip: 2001:db8::3
# Delete router1
- os_router:
cloud: mycloud
state: absent
name: router1
'''
RETURN = '''
router:
description: Dictionary describing the router.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Router ID.
type: string
sample: "474acfe5-be34-494c-b339-50f06aa143e4"
name:
description: Router name.
type: string
sample: "router1"
admin_state_up:
description: Administrative state of the router.
type: boolean
sample: true
status:
description: The router status.
type: string
sample: "ACTIVE"
tenant_id:
description: The tenant ID.
type: string
sample: "861174b82b43463c9edc5202aadc60ef"
external_gateway_info:
description: The external gateway parameters.
type: dictionary
sample: {
"enable_snat": true,
"external_fixed_ips": [
{
"ip_address": "10.6.6.99",
"subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
}
]
}
routes:
description: The extra routes configuration for L3 router.
type: list
'''
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
def _needs_update(cloud, module, router, network, internal_subnet_ids):
"""Decide if the given router needs an update.
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info']:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
if module.params['external_fixed_ips']:
for new_iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(new_iface['subnet'])
exists = False
# compare the requested interface with existing, looking for an existing match
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
if existing_iface['subnet_id'] == subnet['id']:
if 'ip' in new_iface:
if existing_iface['ip_address'] == new_iface['ip']:
# both subnet id and ip address match
exists = True
break
else:
# only the subnet was given, so ip doesn't matter
exists = True
break
# this interface isn't present on the existing router
if not exists:
return True
# check internal interfaces
if module.params['interfaces']:
existing_subnet_ids = []
for port in cloud.list_router_interfaces(router, 'internal'):
if 'fixed_ips' in port:
for fixed_ip in port['fixed_ips']:
existing_subnet_ids.append(fixed_ip['subnet_id'])
if set(internal_subnet_ids) != set(existing_subnet_ids):
return True
return False
def _system_state_change(cloud, module, router, network, internal_ids):
"""Check if the system state would be changed."""
state = module.params['state']
if state == 'absent' and router:
return True
if state == 'present':
if not router:
return True
return _needs_update(cloud, module, router, network, internal_ids)
return False
def _build_kwargs(cloud, module, router, network):
kwargs = {
'admin_state_up': module.params['admin_state_up'],
}
if router:
kwargs['name_or_id'] = router['id']
else:
kwargs['name'] = module.params['name']
if network:
kwargs['ext_gateway_net_id'] = network['id']
# can't send enable_snat unless we have a network
kwargs['enable_snat'] = module.params['enable_snat']
if module.params['external_fixed_ips']:
kwargs['ext_fixed_ips'] = []
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
d = {'subnet_id': subnet['id']}
if 'ip' in iface:
d['ip_address'] = iface['ip']
kwargs['ext_fixed_ips'].append(d)
return kwargs
def _validate_subnets(module, cloud):
external_subnet_ids = []
internal_subnet_ids = []
if module.params['external_fixed_ips']:
for iface in module.params['external_fixed_ips']:
subnet = cloud.get_subnet(iface['subnet'])
if not subnet:
module.fail_json(msg='subnet %s not found' % iface['subnet'])
external_subnet_ids.append(subnet['id'])
if module.params['interfaces']:
for iface in module.params['interfaces']:
subnet = cloud.get_subnet(iface)
if not subnet:
module.fail_json(msg='subnet %s not found' % iface)
internal_subnet_ids.append(subnet['id'])
return external_subnet_ids, internal_subnet_ids
def main():
argument_spec = openstack_full_argument_spec(
state=dict(default='present', choices=['absent', 'present']),
name=dict(required=True),
admin_state_up=dict(type='bool', default=True),
enable_snat=dict(type='bool', default=True),
network=dict(default=None),
interfaces=dict(type='list', default=None),
external_fixed_ips=dict(type='list', default=None),
project=dict(default=None)
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if (module.params['project'] and
StrictVersion(shade.__version__) <= StrictVersion('1.9.0')):
module.fail_json(msg="To utilize project, the installed version of"
"the shade library MUST be > 1.9.0")
state = module.params['state']
name = module.params['name']
network = module.params['network']
project = module.params['project']
if module.params['external_fixed_ips'] and not network:
module.fail_json(msg='network is required when supplying external_fixed_ips')
try:
cloud = shade.openstack_cloud(**module.params)
if project is not None:
proj = cloud.get_project(project)
if proj is None:
module.fail_json(msg='Project %s could not be found' % project)
project_id = proj['id']
filters = {'tenant_id': project_id}
else:
project_id = None
filters = None
router = cloud.get_router(name, filters=filters)
net = None
if network:
net = cloud.get_network(network)
if not net:
module.fail_json(msg='network %s not found' % network)
# Validate and cache the subnet IDs so we can avoid duplicate checks
# and expensive API calls.
external_ids, internal_ids = _validate_subnets(module, cloud)
if module.check_mode:
module.exit_json(
changed=_system_state_change(cloud, module, router, net, internal_ids)
)
if state == 'present':
changed = False
if not router:
kwargs = _build_kwargs(cloud, module, router, net)
if project_id:
kwargs['project_id'] = project_id
router = cloud.create_router(**kwargs)
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
else:
if _needs_update(cloud, module, router, net, internal_ids):
kwargs = _build_kwargs(cloud, module, router, net)
updated_router = cloud.update_router(**kwargs)
# Protect against update_router() not actually
# updating the router.
if not updated_router:
changed = False
# On a router update, if any internal interfaces were supplied,
# just detach all existing internal interfaces and attach the new.
elif internal_ids:
router = updated_router
ports = cloud.list_router_interfaces(router, 'internal')
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
for internal_subnet_id in internal_ids:
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
changed = True
module.exit_json(changed=changed,
router=router,
id=router['id'])
elif state == 'absent':
if not router:
module.exit_json(changed=False)
else:
# We need to detach all internal interfaces on a router before
# we will be allowed to delete it.
ports = cloud.list_router_interfaces(router, 'internal')
router_id = router['id']
for port in ports:
cloud.remove_router_interface(router, port_id=port['id'])
cloud.delete_router(router_id)
module.exit_json(changed=True)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ldotlopez/appkit | appkit/messaging/imap_folder.py | 3 | 7921 | from datetime import datetime
import email
import imaplib
from itertools import chain
import mailbox
import time
# from ldotcommons import messaging
def email_date_to_native(dt):
dt = email.utils.parsedate(dt)
dt = time.mktime(dt)
return datetime.fromtimestamp(dt)
def email_newer_than(email, max_age, now=datetime.now()):
dt = email_date_to_native(email['date'])
delta = now - dt
return delta.total_seconds() <= (60*60*24*max_age)
def imap_query_uids(M, query):
if not query:
query = 'ALL'
typ, data = M.search(None, query)
assert typ == 'OK'
return data[0].split()
def imap_fetch(M, uid, flags_mod, flags):
resp, data = M.fetch(uid, '(RFC822)')
assert resp == 'OK'
if flags_mod:
M.store(uid, flags_mod, flags)
return email.message_from_bytes(data[0][1])
def imap_recv(M, max_age, now=datetime.now()):
messages = []
for (query, flags_mod, flags) in (
('(SEEN)', None, None),
('(UNSEEN)', r'-FLAGS', r'\SEEN')
):
uids = map(lambda x: int(x), imap_query_uids(M, query))
uids = reversed(sorted(uids))
uids = map(lambda x: bytes(str(x).encode('ascii')), uids)
for uid in uids:
msg = imap_fetch(M, uid, flags_mod, flags)
if email_newer_than(msg, max_age, now=now):
messages.append(msg)
else:
break
return sorted(messages,
key=lambda x: email_date_to_native(x['date']),
reverse=True)
class ImapFolder:
def __init__(self,
host=None, port=993,
username=None, password=None, ssl=True,
folder='INBOX',
mbox_paths=[],
max_age=60, now=None
):
if isinstance(mbox_paths, str):
mbox_paths = [mbox_paths]
if host:
if not username or not password:
raise Exception('No credentials provided')
self._opts = {
'host': host,
'port': port,
'username': username,
'password': password,
'port': port,
'folder': folder,
}
self._m_cls = imaplib.IMAP4_SSL if ssl else imaplib.IMAP4
elif mbox_paths:
self._opts = {
'mboxes': mbox_paths
}
self._m_cls = None
else:
raise("Neither host or mbox_path specified")
self._now = now or datetime.now()
self._max_age = max_age
def recv(self, flatten=True):
if self._m_cls:
M = self._m_cls(
self._opts['host'],
self._opts['port'])
M.login(
self._opts['username'],
self._opts['password'])
M.select(self._opts['folder'])
messages = imap_recv(M, max_age=self._max_age, now=self._now)
else:
messages = [x for x in
chain.from_iterable([mailbox.mbox(mbox).values()
for mbox in self._opts['mboxes']])
]
for m in chain.from_iterable([m.walk() for m in messages]):
payload_ = m.get_payload(decode=True)
# Multipart messages doesn't have payload
if payload_ is None:
continue
encodings = ['utf-8', 'iso-8859-15', 'ascii']
msg_encoding = m.get_content_charset()
if msg_encoding:
encodings = [msg_encoding.split(',')[0]] + encodings
payload = None
for encoding in encodings:
try:
payload = payload_.decode(encoding)
m.set_payload(payload, charset=encoding)
break
except UnicodeDecodeError:
pass
if payload is None:
print("Unable to decode")
continue
if flatten:
# Copy headers from parent message to its children
for message in messages:
for m in message.walk():
message_keys = set(message.keys())
m_keys = set(m.keys())
for k in message_keys - m_keys:
m[k] = message[k]
# Flat list
messages = [m for m in chain.from_iterable(
[m.walk() for m in messages]) if not m.is_multipart()]
# for message in messages:
# for m in message.walk():
# print("multipart: {}, payload: {}".format(
# m.is_multipart(), type(m.get_payload())))
return messages
# @property
# def messages(self):
# if self._messages is None and self._M:
# self._messages = self._fetch()
# return self._messages
# @messages.setter
# def messages(self, value):
# raise ValueError('read-only property')
# def walk_messages(self):
# return chain.from_iterable([m.walk() for m in self.messages])
# def _fetch(self):
# def _search(self, query=None):
# if not query:
# query = 'ALL'
# typ, data = self._M.search(None, query)
# assert typ == 'OK'
# uids = data[0].split()
# for uid in reversed(uids):
# yield uid
# def email_date_to_native(date):
# date = email.utils.parsedate(date)
# date = time.mktime(date)
# return datetime.fromtimestamp(date)
# def _is_recent(msg):
# date = email_date_to_native(msg['date'])
# delta = self._now - date
# return delta.total_seconds() <= (60*60*24*self._max_age)
# msgs = []
# for search, flag_mod, flags in [
# ('(SEEN)', None, None),
# ('(UNSEEN)', '-FLAGS', r'\SEEN')
# ]:
# for uid in _search(search):
# resp, data = self._M.fetch(uid, '(RFC822)')
# msg = self.read_message_from_bytes(data[0][1])
# if flag_mod:
# self._M.store(uid, flag_mod, flags)
# if _is_recent(msg):
# msgs.append(msg)
# else:
# break
# def read_message_from_file(self, path):
# with open(path, 'rb') as fh:
# self.read_message_from_bytes(fh.read())
# def read_message_from_bytes(self, buff):
# msg = email.message_from_bytes(buff)
# for m in msg.walk():
# payload_ = m.get_payload(decode=True)
# # Multipart messages doesn't have payload
# if payload_ is None:
# continue
# encodings = ['utf-8', 'iso-8859-15', 'ascii']
# msg_encoding = m.get_content_charset()
# if msg_encoding:
# encodings = [msg_encoding] + encodings
# payload = None
# for encoding in encodings:
# try:
# payload = payload_.decode(encoding)
# m.set_payload(payload, charset=encoding)
# break
# except UnicodeDecodeError:
# pass
# if payload is None:
# print("Unable to decode")
# continue
# self._messages.append(msg)
# def process_with(self, inspector):
# for msg in self.messages:
# ret = inspector.process(msg)
# if ret is None:
# continue
# elif isinstance(ret, collections.Iterable):
# for r in ret:
# yield r
# else:
# yield ret
# raise StopIteration()
| gpl-2.0 |
juanyaw/python | cpython/Tools/scripts/fixnotice.py | 64 | 3059 | #! /usr/bin/env python3
"""(Ostensibly) fix copyright notices in files.
Actually, this script will simply replace a block of text in a file from one
string to another. It will only do this once though, i.e. not globally
throughout the file. It writes a backup file and then does an os.rename()
dance for atomicity.
Usage: fixnotices.py [options] [filenames]
Options:
-h / --help
Print this message and exit
--oldnotice=file
Use the notice in the file as the old (to be replaced) string, instead
of the hard coded value in the script.
--newnotice=file
Use the notice in the file as the new (replacement) string, instead of
the hard coded value in the script.
--dry-run
Don't actually make the changes, but print out the list of files that
would change. When used with -v, a status will be printed for every
file.
-v / --verbose
Print a message for every file looked at, indicating whether the file
is changed or not.
"""
OLD_NOTICE = """/***********************************************************
Copyright (c) 2000, BeOpen.com.
Copyright (c) 1995-2000, Corporation for National Research Initiatives.
Copyright (c) 1990-1995, Stichting Mathematisch Centrum.
All rights reserved.
See the file "Misc/COPYRIGHT" for information on usage and
redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES.
******************************************************************/
"""
import os
import sys
import getopt
NEW_NOTICE = ""
DRYRUN = 0
VERBOSE = 0
def usage(code, msg=''):
print(__doc__ % globals())
if msg:
print(msg)
sys.exit(code)
def main():
global DRYRUN, OLD_NOTICE, NEW_NOTICE, VERBOSE
try:
opts, args = getopt.getopt(sys.argv[1:], 'hv',
['help', 'oldnotice=', 'newnotice=',
'dry-run', 'verbose'])
except getopt.error as msg:
usage(1, msg)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-v', '--verbose'):
VERBOSE = 1
elif opt == '--dry-run':
DRYRUN = 1
elif opt == '--oldnotice':
fp = open(arg)
OLD_NOTICE = fp.read()
fp.close()
elif opt == '--newnotice':
fp = open(arg)
NEW_NOTICE = fp.read()
fp.close()
for arg in args:
process(arg)
def process(file):
f = open(file)
data = f.read()
f.close()
i = data.find(OLD_NOTICE)
if i < 0:
if VERBOSE:
print('no change:', file)
return
elif DRYRUN or VERBOSE:
print(' change:', file)
if DRYRUN:
# Don't actually change the file
return
data = data[:i] + NEW_NOTICE + data[i+len(OLD_NOTICE):]
new = file + ".new"
backup = file + ".bak"
f = open(new, "w")
f.write(data)
f.close()
os.rename(file, backup)
os.rename(new, file)
if __name__ == '__main__':
main()
| bsd-3-clause |
frohoff/Empire | lib/modules/python/trollsploit/osx/login_message.py | 12 | 3986 | class Module:
def __init__(self, mainMenu, params=[]):
# metadata info about the module, not modified during runtime
self.info = {
# name for the module that will appear in module menus
'Name': 'Change Login Message for the user.',
# list of one or more authors for the module
'Author': ['@424f424f'],
# more verbose multi-line description of the module
'Description': 'Change the login message for the user.',
# True if the module needs to run in the background
'Background' : False,
# File extension to save the file as
'OutputExtension' : "",
# if the module needs administrative privileges
'NeedsAdmin' : True,
# True if the method doesn't touch disk/is reasonably opsec safe
'OpsecSafe' : False,
# the module language
'Language' : 'python',
# the minimum language version needed
'MinLanguageVersion' : '2.6',
# list of any references/other comments
'Comments': ['']
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Agent to run on.',
'Required' : True,
'Value' : ''
},
'Message' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'Message to display',
'Required' : False,
'Value' : ''
},
'Remove' : {
# The 'Agent' option is the only one that MUST be in a module
'Description' : 'True/False to remove login message.',
'Required' : True,
'Value' : 'False'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
# During instantiation, any settable option parameters
# are passed as an object set to the module and the
# options dictionary is automatically set. This is mostly
# in case options are passed on the command line
if params:
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
message = self.options['Message']['Value']
remove = self.options['Remove']['Value']
# the Python script itself, with the command to invoke
# for execution appended to the end. Scripts should output
# everything to the pipeline for proper parsing.
#
# the script should be stripped of comments, with a link to any
# original reference script included in the comments.
script = """
import subprocess
remove = %s
try:
if remove == True:
cmd = \"""defaults delete /Library/Preferences/com.apple.loginwindow LoginwindowText""\"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
print "Login message removed"
elif remove == False:
cmd = \"""defaults write /Library/Preferences/com.apple.loginwindow LoginwindowText '%s' ""\"
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
print "Login message added"
else:
print "Invalid options"
except Exception as e:
print "Module failed"
print e
""" % (remove, message)
return script
| bsd-3-clause |
surligas/cs436-gnuradio | gnuradio-runtime/python/gnuradio/gru/gnuplot_freqz.py | 59 | 4137 | #!/usr/bin/env python
#
# Copyright 2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
__all__ = ['gnuplot_freqz']
import tempfile
import os
import math
import numpy
from gnuradio import gr
from gnuradio.gru.freqz import freqz
def gnuplot_freqz (hw, Fs=None, logfreq=False):
"""hw is a tuple of the form (h, w) where h is sequence of complex
freq responses, and w is a sequence of corresponding frequency
points. Plot the frequency response using gnuplot. If Fs is
provide, use it as the sampling frequency, else use 2*pi.
Returns a handle to the gnuplot graph. When the handle is reclaimed
the graph is torn down."""
data_file = tempfile.NamedTemporaryFile ()
cmd_file = os.popen ('gnuplot', 'w')
h, w = hw
ampl = 20 * numpy.log10 (numpy.absolute (h) + 1e-9)
phase = map (lambda x: math.atan2 (x.imag, x.real), h)
if Fs:
w *= (Fs/(2*math.pi))
for freq, a, ph in zip (w, ampl, phase):
data_file.write ("%g\t%g\t%g\n" % (freq, a, ph))
data_file.flush ()
cmd_file.write ("set grid\n")
if logfreq:
cmd_file.write ("set logscale x\n")
else:
cmd_file.write ("unset logscale x\n")
cmd_file.write ("plot '%s' using 1:2 with lines\n" % (data_file.name,))
cmd_file.flush ()
return (cmd_file, data_file)
def test_plot ():
sample_rate = 2.0e6
#taps = firdes.low_pass(1, sample_rate, 200000, 100000, firdes.WIN_HAMMING)
taps = (0.0007329441141337156, 0.0007755281985737383, 0.0005323155201040208,
-7.679847761841656e-19, -0.0007277769618667662, -0.001415981911122799,
-0.0017135187517851591, -0.001282231998629868, 1.61239866282397e-18,
0.0018589380197227001, 0.0035909228026866913, 0.004260237794369459,
0.00310456077568233, -3.0331308923229716e-18, -0.004244099836796522,
-0.007970594801008701, -0.009214458055794239, -0.006562007591128349,
4.714311174044374e-18, 0.008654761128127575, 0.01605774275958538,
0.01841980405151844, 0.013079923577606678, -6.2821650235090215e-18,
-0.017465557903051376, -0.032989680767059326, -0.03894065320491791,
-0.028868533670902252, 7.388111706347014e-18, 0.04517475143074989,
0.09890196472406387, 0.14991308748722076, 0.18646684288978577,
0.19974154233932495, 0.18646684288978577, 0.14991308748722076,
0.09890196472406387, 0.04517475143074989, 7.388111706347014e-18,
-0.028868533670902252, -0.03894065320491791, -0.032989680767059326,
-0.017465557903051376, -6.2821650235090215e-18, 0.013079923577606678,
0.01841980405151844, 0.01605774275958538, 0.008654761128127575,
4.714311174044374e-18, -0.006562007591128349, -0.009214458055794239,
-0.007970594801008701, -0.004244099836796522, -3.0331308923229716e-18,
0.00310456077568233, 0.004260237794369459, 0.0035909228026866913,
0.0018589380197227001, 1.61239866282397e-18, -0.001282231998629868,
-0.0017135187517851591, -0.001415981911122799, -0.0007277769618667662,
-7.679847761841656e-19, 0.0005323155201040208, 0.0007755281985737383,
0.0007329441141337156)
# print len (taps)
return gnuplot_freqz (freqz (taps, 1), sample_rate)
if __name__ == '__main__':
handle = test_plot ()
raw_input ('Press Enter to continue: ')
| gpl-3.0 |
FrankBian/kuma | vendor/packages/translate-toolkit/translate/convert/xliff2oo.py | 6 | 10284 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2004-2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""convert XLIFF localization files to an OpenOffice.org (SDF) localization file"""
import sys
import os
from translate.storage import oo
from translate.storage import factory
from translate.filters import pofilter
from translate.filters import checks
from translate.filters import autocorrect
import time
class reoo:
def __init__(self, templatefile, languages=None, timestamp=None, includefuzzy=False, long_keys=False, filteraction="exclude"):
"""construct a reoo converter for the specified languages (timestamp=0 means leave unchanged)"""
# languages is a pair of language ids
self.long_keys = long_keys
self.readoo(templatefile)
self.languages = languages
self.filteraction = filteraction
if timestamp is None:
self.timestamp = time.strptime("2002-02-02 02:02:02", "%Y-%m-%d %H:%M:%S")
else:
self.timestamp = timestamp
if self.timestamp:
self.timestamp_str = time.strftime("%Y-%m-%d %H:%M:%S", self.timestamp)
else:
self.timestamp_str = None
self.includefuzzy = includefuzzy
def makeindex(self):
"""makes an index of the oo keys that are used in the source file"""
self.index = {}
for ookey, theoo in self.o.ookeys.iteritems():
sourcekey = oo.makekey(ookey, self.long_keys)
self.index[sourcekey] = theoo
def readoo(self, of):
"""read in the oo from the file"""
oosrc = of.read()
self.o = oo.oofile()
self.o.parse(oosrc)
self.makeindex()
def handleunit(self, unit):
# TODO: make this work for multiple columns in oo...
locations = unit.getlocations()
# technically our formats should just have one location for each entry...
# but we handle multiple ones just to be safe...
for location in locations:
subkeypos = location.rfind('.')
subkey = location[subkeypos+1:]
key = location[:subkeypos]
# this is just to handle our old system of using %s/%s:%s instead of %s/%s#%s
key = key.replace(':', '#')
# this is to handle using / instead of \ in the sourcefile...
key = key.replace('\\', '/')
key = oo.normalizefilename(key)
if self.index.has_key(key):
# now we need to replace the definition of entity with msgstr
theoo = self.index[key] # find the oo
self.applytranslation(key, subkey, theoo, unit)
else:
print >> sys.stderr, "couldn't find key %s from po in %d keys" % (key, len(self.index))
try:
sourceunitlines = str(unit)
if isinstance(sourceunitlines, unicode):
sourceunitlines = sourceunitlines.encode("utf-8")
print >> sys.stderr, sourceunitlines
except:
print >> sys.stderr, "error outputting source unit %r" % (str(unit),)
def applytranslation(self, key, subkey, theoo, unit):
"""applies the translation from the source unit to the oo unit"""
if not self.includefuzzy and unit.isfuzzy():
return
makecopy = False
if self.languages is None:
part1 = theoo.lines[0]
if len(theoo.lines) > 1:
part2 = theoo.lines[1]
else:
makecopy = True
else:
part1 = theoo.languages[self.languages[0]]
if self.languages[1] in theoo.languages:
part2 = theoo.languages[self.languages[1]]
else:
makecopy = True
if makecopy:
part2 = oo.ooline(part1.getparts())
unquotedid = unit.source
unquotedstr = unit.target
# If there is no translation, we don't want to add a line
if len(unquotedstr.strip()) == 0:
return
if isinstance(unquotedstr, unicode):
unquotedstr = unquotedstr.encode("UTF-8")
# finally set the new definition in the oo, but not if its empty
if len(unquotedstr) > 0:
subkey = subkey.strip()
setattr(part2, subkey, unquotedstr)
# set the modified time
if self.timestamp_str:
part2.timestamp = self.timestamp_str
if self.languages:
part2.languageid = self.languages[1]
if makecopy:
theoo.addline(part2)
def convertstore(self, sourcestore):
self.p = sourcestore
# translate the strings
for unit in self.p.units:
# there may be more than one element due to msguniq merge
if filter.validelement(unit, self.p.filename, self.filteraction):
self.handleunit(unit)
# return the modified oo file object
return self.o
def getmtime(filename):
import stat
return time.localtime(os.stat(filename)[stat.ST_MTIME])
class oocheckfilter(pofilter.pocheckfilter):
def validelement(self, unit, filename, filteraction):
"""Returns whether or not to use unit in conversion. (filename is just for error reporting)"""
if filteraction == "none": return True
filterresult = self.filterunit(unit)
if filterresult:
if filterresult != autocorrect:
for filtername, filtermessage in filterresult.iteritems():
location = unit.getlocations()[0]
if filtername in self.options.error:
print >> sys.stderr, "Error at %s::%s: %s" % (filename, location, filtermessage)
return not filteraction in ["exclude-all", "exclude-serious"]
if filtername in self.options.warning or self.options.alwayswarn:
print >> sys.stderr, "Warning at %s::%s: %s" % (filename, location, filtermessage)
return not filteraction in ["exclude-all"]
return True
class oofilteroptions:
error = ['variables', 'xmltags', 'escapes']
warning = ['blank']
#To only issue warnings for tests listed in warning, change the following to False:
alwayswarn = True
limitfilters = error + warning
#To use all available tests, uncomment the following:
#limitfilters = []
#To exclude certain tests, list them in here:
excludefilters = {}
includefuzzy = False
includereview = False
autocorrect = False
options = oofilteroptions()
filter = oocheckfilter(options, [checks.OpenOfficeChecker, checks.StandardUnitChecker], checks.openofficeconfig)
def convertoo(inputfile, outputfile, templatefile, sourcelanguage=None, targetlanguage=None, timestamp=None, includefuzzy=False, multifilestyle="single", filteraction=None):
inputstore = factory.getobject(inputfile)
inputstore.filename = getattr(inputfile, 'name', '')
if not targetlanguage:
raise ValueError("You must specify the target language")
if not sourcelanguage:
if targetlanguage.isdigit():
sourcelanguage = "01"
else:
sourcelanguage = "en-US"
languages = (sourcelanguage, targetlanguage)
if templatefile is None:
raise ValueError("must have template file for oo files")
else:
convertor = reoo(templatefile, languages=languages, timestamp=timestamp, includefuzzy=includefuzzy, long_keys=multifilestyle != "single", filteraction=filteraction)
outputstore = convertor.convertstore(inputstore)
# TODO: check if we need to manually delete missing items
outputfile.write(str(outputstore))
return True
def main(argv=None):
from translate.convert import convert
formats = {("po", "oo"):("oo", convertoo), ("xlf", "oo"):("oo", convertoo), ("xlf", "sdf"):("sdf", convertoo)}
# always treat the input as an archive unless it is a directory
archiveformats = {(None, "output"): oo.oomultifile, (None, "template"): oo.oomultifile}
parser = convert.ArchiveConvertOptionParser(formats, usetemplates=True, description=__doc__, archiveformats=archiveformats)
parser.add_option("-l", "--language", dest="targetlanguage", default=None,
help="set target language code (e.g. af-ZA) [required]", metavar="LANG")
parser.add_option("", "--source-language", dest="sourcelanguage", default=None,
help="set source language code (default en-US)", metavar="LANG")
parser.add_option("-T", "--keeptimestamp", dest="timestamp", default=None, action="store_const", const=0,
help="don't change the timestamps of the strings")
parser.add_option("", "--nonrecursiveoutput", dest="allowrecursiveoutput", default=True, action="store_false", help="don't treat the output oo as a recursive store")
parser.add_option("", "--nonrecursivetemplate", dest="allowrecursivetemplate", default=True, action="store_false", help="don't treat the template oo as a recursive store")
parser.add_option("", "--filteraction", dest="filteraction", default="none", metavar="ACTION",
help="action on pofilter failure: none (default), warn, exclude-serious, exclude-all")
parser.add_fuzzy_option()
parser.add_multifile_option()
parser.passthrough.append("sourcelanguage")
parser.passthrough.append("targetlanguage")
parser.passthrough.append("timestamp")
parser.passthrough.append("filteraction")
parser.run(argv)
if __name__ == '__main__':
main()
| mpl-2.0 |
JonathanStein/odoo | addons/l10n_fr/report/__init__.py | 424 | 1475 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import base_report
import bilan_report
import compute_resultant_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/requests/packages/urllib3/util/url.py | 149 | 6289 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
if scheme:
scheme = scheme.lower()
if host:
host = host.lower()
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers. No whitespace, no plus or
# minus prefixes, no non-integer digits such as ^2 (superscript).
if not port.isdigit():
raise LocationParseError(url)
try:
port = int(port)
except ValueError:
raise LocationParseError(url)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| gpl-3.0 |
environmentalscience/essm | docs/examples/test_variable_definitions.py | 1 | 11960 | # -*- coding: utf-8 -*-
#
# This file is for use with essm.
# Copyright (C) 2020 ETH Zurich, Swiss Data Science Center.
#
# essm is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# essm is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with essm; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
"""Variables defined in api_features.ipynb and dependencies."""
from sympy.physics.units import (
joule, kelvin, kilogram, meter, mole, pascal, second, watt
)
from essm.variables import Variable
class alpha_a(Variable):
"""Thermal diffusivity of dry air."""
name = 'alpha_a'
unit = meter ** 2 / second
assumptions = {'real': True}
latex_name = '\\alpha_a'
class c_pa(Variable):
"""Specific heat of dry air."""
name = 'c_pa'
unit = joule / (kelvin * kilogram)
assumptions = {'real': True}
latex_name = 'c_{pa}'
default = 1010.0
class c_pamol(Variable):
"""Molar specific heat of dry air.
https://en.wikipedia.org/wiki/Heat_capacity#Specific_heat_capacity
"""
name = 'c_pamol'
unit = joule / (kelvin * mole)
assumptions = {'real': True}
latex_name = 'c_{pa,mol}'
default = 29.19
class c_pv(Variable):
"""Specific heat of water vapour at 300 K.
http://www.engineeringtoolbox.com/water-vapor-d_979.html
"""
name = 'c_pv'
unit = joule / (kelvin * kilogram)
assumptions = {'real': True}
latex_name = 'c_{pv}'
default = 1864
class C_wa(Variable):
"""Concentration of water in air."""
name = 'C_wa'
unit = mole / meter ** 3
assumptions = {'real': True}
latex_name = 'C_{wa}'
class D_va(Variable):
"""Binary diffusion coefficient of water vapour in air."""
name = 'D_va'
unit = meter ** 2 / second
assumptions = {'real': True}
latex_name = 'D_{va}'
class g(Variable):
"""Gravitational acceleration."""
name = 'g'
unit = meter / second ** 2
assumptions = {'real': True}
latex_name = 'g'
default = 9.81
class Gr(Variable):
"""Grashof number."""
name = 'Gr'
unit = 1
assumptions = {'real': True}
latex_name = 'N_{Gr_L}'
class h_c(Variable):
"""Average 1-sided convective heat transfer coefficient."""
name = 'h_c'
unit = joule / (kelvin * meter ** 2 * second)
assumptions = {'real': True}
latex_name = 'h_c'
class k_a(Variable):
"""Thermal conductivity of dry air."""
name = 'k_a'
unit = joule / (kelvin * meter * second)
assumptions = {'real': True}
latex_name = 'k_a'
class lambda_E(Variable):
"""Latent heat of evaporation."""
name = 'lambda_E'
unit = joule / kilogram
assumptions = {'real': True}
latex_name = '\\lambda_E'
default = 2450000.0
class Le(Variable):
"""Lewis number."""
name = 'Le'
unit = 1
assumptions = {'real': True}
latex_name = 'N_{Le}'
class M_air(Variable):
"""Molar mass of air.
http://www.engineeringtoolbox.com/molecular-mass-air-d_679.html
"""
name = 'M_air'
unit = kilogram / mole
assumptions = {'real': True}
latex_name = 'M_{air}'
default = 0.02897
class M_N2(Variable):
"""Molar mass of nitrogen."""
name = 'M_N2'
unit = kilogram / mole
assumptions = {'real': True}
latex_name = 'M_{N_2}'
default = 0.028
class M_O2(Variable):
"""Molar mass of oxygen."""
name = 'M_O2'
unit = kilogram / mole
assumptions = {'real': True}
latex_name = 'M_{O_2}'
default = 0.032
class M_w(Variable):
"""Molar mass of water."""
name = 'M_w'
unit = kilogram / mole
assumptions = {'real': True}
latex_name = 'M_w'
default = 0.018
class nu_a(Variable):
"""Kinematic viscosity of dry air."""
name = 'nu_a'
unit = meter ** 2 / second
assumptions = {'real': True}
latex_name = '\\nu_a'
class Nu(Variable):
"""Average Nusselt number over given length."""
name = 'Nu'
unit = 1
assumptions = {'real': True}
latex_name = 'N_{Nu_L}'
class P_a(Variable):
"""Air pressure."""
name = 'P_a'
unit = pascal
assumptions = {'real': True}
latex_name = 'P_a'
class Pr(Variable):
"""Prandtl number (0.71 for air)."""
name = 'Pr'
unit = 1
assumptions = {'real': True}
latex_name = 'N_{Pr}'
class P_N2(Variable):
"""Partial pressure of nitrogen."""
name = 'P_N2'
unit = pascal
assumptions = {'real': True}
latex_name = 'P_{N2}'
class P_O2(Variable):
"""Partial pressure of oxygen."""
name = 'P_O2'
unit = pascal
assumptions = {'real': True}
latex_name = 'P_{O2}'
class P_wa(Variable):
"""Partial pressure of water vapour in air."""
name = 'P_wa'
unit = pascal
assumptions = {'real': True}
latex_name = 'P_{wa}'
class P_was(Variable):
"""Saturation water vapour pressure at air temperature."""
name = 'P_was'
unit = pascal
assumptions = {'real': True}
latex_name = 'P_{was}'
class R_d(Variable):
"""Downwelling global radiation."""
name = 'R_d'
unit = watt / meter ** 2
assumptions = {'real': True}
latex_name = 'R_d'
class Re_c(Variable):
"""Critical Reynolds number for the onset of turbulence."""
name = 'Re_c'
unit = 1
assumptions = {'real': True}
latex_name = 'N_{Re_c}'
class Re(Variable):
"""Average Reynolds number over given length."""
name = 'Re'
unit = 1
assumptions = {'real': True}
latex_name = 'N_{Re_L}'
class rho_a(Variable):
"""Density of dry air."""
name = 'rho_a'
unit = kilogram / meter ** 3
assumptions = {'real': True}
latex_name = '\\rho_a'
class R_u(Variable):
"""Upwelling global radiation."""
name = 'R_u'
unit = watt / meter ** 2
assumptions = {'real': True}
latex_name = 'R_u'
class R_mol(Variable):
"""Molar gas constant."""
name = 'R_mol'
unit = joule / (kelvin * mole)
assumptions = {'real': True}
latex_name = 'R_{mol}'
default = 8.314472
class R_s(Variable):
"""Solar shortwave flux per area."""
name = 'R_s'
unit = joule / (meter ** 2 * second)
assumptions = {'real': True}
latex_name = 'R_s'
class sigm(Variable):
"""Stefan-Boltzmann constant."""
name = 'sigm'
unit = joule / (kelvin ** 4 * meter ** 2 * second)
assumptions = {'real': True}
latex_name = '\\sigma'
default = 5.67e-08
class T0(Variable):
"""Freezing point in Kelvin."""
name = 'T0'
unit = kelvin
assumptions = {'real': True}
latex_name = 'T_0'
default = 273.15
class T_a(Variable):
"""Air temperature."""
name = 'T_a'
unit = kelvin
assumptions = {'real': True}
latex_name = 'T_a'
class v_w(Variable):
"""Wind velocity."""
name = 'v_w'
unit = meter / second
assumptions = {'real': True}
latex_name = 'v_w'
class x_N2(Variable):
"""Mole fraction of nitrogen in dry air."""
name = 'x_N2'
unit = 1
assumptions = {'real': True}
latex_name = 'x_{N2}'
default = 0.79
class x_O2(Variable):
"""Mole fraction of oxygen in dry air."""
name = 'x_O2'
unit = 1
assumptions = {'real': True}
latex_name = 'x_{O2}'
default = 0.21
class p_Dva1(Variable):
"""Internal parameter of eq_Dva."""
name = 'p_Dva1'
unit = meter ** 2 / (kelvin * second)
assumptions = {'real': True}
latex_name = 'p_1'
default = 1.49e-07
class p_Dva2(Variable):
"""Internal parameter of eq_Dva."""
name = 'p_Dva2'
unit = meter ** 2 / second
assumptions = {'real': True}
latex_name = 'p_2'
default = 1.96e-05
class p_alpha1(Variable):
"""Internal parameter of eq_alphaa."""
name = 'p_alpha1'
unit = meter ** 2 / (kelvin * second)
assumptions = {'real': True}
latex_name = 'p_1'
default = 1.32e-07
class p_alpha2(Variable):
"""Internal parameter of eq_alphaa."""
name = 'p_alpha2'
unit = meter ** 2 / second
assumptions = {'real': True}
latex_name = 'p_2'
default = 1.73e-05
class p_ka1(Variable):
"""Internal parameter of eq_ka."""
name = 'p_ka1'
unit = joule / (kelvin ** 2 * meter * second)
assumptions = {'real': True}
latex_name = 'p_1'
default = 6.84e-05
class p_ka2(Variable):
"""Internal parameter of eq_ka."""
name = 'p_ka2'
unit = joule / (kelvin * meter * second)
assumptions = {'real': True}
latex_name = 'p_2'
default = 0.00563
class p_nua1(Variable):
"""Internal parameter of eq_nua."""
name = 'p_nua1'
unit = meter ** 2 / (kelvin * second)
assumptions = {'real': True}
latex_name = 'p_1'
default = 9e-08
class p_nua2(Variable):
"""Internal parameter of eq_nua."""
name = 'p_nua2'
unit = meter ** 2 / second
assumptions = {'real': True}
latex_name = 'p_2'
default = 1.13e-05
class P_g(Variable):
"""Pressure of gas."""
name = 'P_g'
unit = pascal
assumptions = {'real': True}
latex_name = 'P_g'
class V_g(Variable):
"""Volume of gas."""
name = 'V_g'
unit = meter ** 3
assumptions = {'real': True}
latex_name = 'V_g'
class n_g(Variable):
"""Amount of gas."""
name = 'n_g'
unit = mole
assumptions = {'real': True}
latex_name = 'n_g'
class n_w(Variable):
"""Amount of water."""
name = 'n_w'
unit = mole
assumptions = {'real': True}
latex_name = 'n_w'
class T_g(Variable):
"""Temperature of gas."""
name = 'T_g'
unit = kelvin
assumptions = {'real': True}
latex_name = 'T_g'
class Delta_Pwa(Variable):
"""Slope of saturated vapour pressure, $\partial P_{wa} / \partial T_g$"""
name = 'Delta_Pwa'
unit = pascal / kelvin
assumptions = {'real': True}
latex_name = '\\Delta'
class x(Variable):
"""Positive real variable."""
name = 'x'
unit = 1
assumptions = {'positive': True, 'real': True}
latex_name = 'x'
class p_CC1(Variable):
"""Internal parameter of eq_Pwl."""
name = 'p_CC1'
unit = pascal
assumptions = {'real': True}
latex_name = '611'
default = 611.0
class p_CC2(Variable):
"""Internal parameter of eq_Pwl."""
name = 'p_CC2'
unit = kelvin
assumptions = {'real': True}
latex_name = '273'
default = 273.0
class T_a1(Variable):
"""Air temperature"""
name = 'T_a1'
unit = kelvin
assumptions = {'real': True}
latex_name = 'T_{a1}'
class T_a2(Variable):
"""Air temperature"""
name = 'T_a2'
unit = kelvin
assumptions = {'real': True}
latex_name = 'T_{a2}'
class P_wa1(Variable):
"""P_wa at T1"""
name = 'P_wa1'
unit = pascal
assumptions = {'real': True}
latex_name = 'P_{wa1}'
__all__ = (
'alpha_a',
'c_pa',
'c_pamol',
'c_pv',
'C_wa',
'D_va',
'g',
'Gr',
'h_c',
'k_a',
'lambda_E',
'Le',
'M_air',
'M_N2',
'M_O2',
'M_w',
'nu_a',
'Nu',
'P_a',
'Pr',
'P_N2',
'P_O2',
'P_wa',
'P_was',
'R_d',
'Re_c',
'Re',
'rho_a',
'R_u',
'R_mol',
'R_s',
'sigm',
'T0',
'T_a',
'v_w',
'x_N2',
'x_O2',
'p_Dva1',
'p_Dva2',
'p_alpha1',
'p_alpha2',
'p_ka1',
'p_ka2',
'p_nua1',
'p_nua2',
'P_g',
'V_g',
'n_g',
'n_w',
'T_g',
'Delta_Pwa',
'x',
'p_CC1',
'p_CC2',
'T_a1',
'T_a2',
'P_wa1',
)
| gpl-2.0 |
opendatagroup/cassius | tags/cassius-0_1_0_0/cassius/containers.py | 1 | 141242 | # Standard Python packages
import math, cmath
import re
import itertools
import numbers
import random
# Special dependencies
import numpy, numpy.random # sudo apt-get install python-numpy
# import minuit # no package
# Augustus dependencies
from augustus.kernel.unitable import UniTable
# Cassius interdependencies
import mathtools
import utilities
import color
import containers
class ContainerException(Exception):
"""Run-time errors in container objects."""
pass
class AutoType:
def __repr__(self):
if self is Auto:
return "Auto"
else:
raise ContainerException, "There must only be one instance of Auto"
#: Symbol indicating that a frame argument should be
#: automatically-generated, if possible. Similar to `None` in that
#: there is only one instance (checked with `is`), but with a different
#: meaning.
#:
#: Example:
#: `xticks = None` means that no x-ticks are drawn
#:
#: `xticks = Auto` means that x-ticks are automatically generated
#:
#: `Auto` is the only instance of `AutoType`.
Auto = AutoType()
######################################################### Layout of the page, coordinate frames, overlays
# for arranging a grid of plots
class Layout:
"""Represents a regular grid of plots.
Signatures::
Layout(nrows, ncols, plot1[, plot2[, ...]])
Layout(plot1[, plot2[, ...]], nrows=value, ncols=value)
Arguments:
nrows (number): number of rows
ncols (number): number of columns
plots (list of `Frame` or other `Layout` objects): plots to
draw, organized in normal reading order (left to right, columns
before rows)
Public Members:
`nrows`, `ncols`, `plots`
Behavior:
It is possible to create an empty Layout (no plots).
For a Layout object named `layout`, `layout[i,j]` accesses a
plot in row `i` and column `j`, while `layout.plots[k]`
accesses a plot by a serial index (`layout.plots` is a normal
list).
Spaces containing `None` will be blank.
Layouts can be nested: e.g. `Layout(1, 2, top, Layout(2, 1,
bottomleft, bottomright))`.
"""
def __init__(self, *args, **kwds):
if "nrows" in kwds and "ncols" in kwds:
self.nrows, self.ncols = kwds["nrows"], kwds["ncols"]
self.plots = list(args)
if set(kwds.keys()) != set(["nrows", "ncols"]):
raise TypeError, "Unrecognized keyword argument"
elif len(args) >= 2 and isinstance(args[0], (numbers.Number, numpy.number)) and isinstance(args[1], (numbers.Number, numpy.number)):
self.nrows, self.ncols = args[0:2]
self.plots = list(args[2:])
if set(kwds.keys()) != set([]):
raise TypeError, "Unrecognized keyword argument"
else:
raise TypeError, "Missing nrows or ncols argument"
def index(self, i, j):
"""Convert a grid index (i,j) into a serial index."""
if i < 0 or j < 0 or i >= self.nrows or j >= self.ncols:
raise ContainerException, "Index (%d,%d) is beyond the %dx%d grid of plots" % (i, j, self.nrows, self.ncols)
return self.ncols*i + j
def __getitem__(self, ij):
i, j = ij
index = self.index(i, j)
if index < len(self.plots):
return self.plots[index]
else:
return None
def __setitem__(self, ij, value):
i, j = ij
index = self.index(i, j)
if index < len(self.plots):
self.plots[index] = value
else:
for k in range(len(self.plots), index):
self.plots.append(None)
self.plots.append(value)
def __delitem__(self, ij):
i, j = ij
if self.index(i, j) < len(self.plots):
self.plots[self.index(i, j)] = None
def __repr__(self):
return "<Layout %dx%d at 0x%x>" % (self.nrows, self.ncols, id(self))
# for representing a coordinate axis
class Frame:
"""Abstract superclass for all plots with drawable coordinate frames.
Frame arguments:
Any frame argument (axis labels, margins, etc.) can be passed
as a keyword in the constructor or later as member data. The
frame arguments are interpreted only by the backend and are
replaced with defaults if not present.
Public Members:
All frame arguments that have been set.
"""
_not_frameargs = []
def __init__(self, **frameargs):
self.__dict__.update(frameargs)
def __repr__(self):
return "<Frame %s at 0x%x>" % (str(self._frameargs()), id(self))
def _frameargs(self):
output = dict(self.__dict__)
for i in self._not_frameargs:
if i in output: del output[i]
for i in output.keys():
if i[0] == "_": del output[i]
return output
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
The abstract class, `Frame`, returns constant intervals (0, 1)
(or (0.1, 1) for log scales.)
"""
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if ylog:
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
return xmin, ymin, xmax, ymax
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None): pass # get ready to be drawn
# for overlaying different containers' data in a single frame
class Overlay(Frame):
"""Represents an overlay of several plots in the same coordinate axis.
Signatures::
Overlay(frame, plot1[, plot2[, ...]], [framearg=value[, ...]])
Overlay(plot1[, plot2[, ...]], [frame=value[, framearg=value[, ...]]])
Arguments:
plots (`Frame` instances): plots to be overlaid
frame (index or `None`): which, if any, plot to use to set the
coordinate frame. If `frame=None`, then `frameargs` will be
taken from the `Overlay` instance and a data-space bounding box
will be derived from the union of all contents.
Public Members:
`plots`, `frame`
Behavior:
It is *not* possible to create an empty Overlay (no plots).
"""
_not_frameargs = ["plots", "frame"]
def __init__(self, first, *others, **frameargs):
if isinstance(first, (int, long)):
self.frame = first
self.plots = list(others)
else:
self.plots = [first] + list(others)
Frame.__init__(self, **frameargs)
def append(self, plot):
"""Append a plot to the end of `plots` (drawn last), keeping the `frame` pointer up-to-date."""
self.plots.append(plot)
if getattr(self, "frame", None) is not None and self.frame < 0:
self.frame -= 1
def prepend(self, plot):
"""Prepend a plot at the beginning of `plots` (drawn first), keeping the `frame` pointer up-to-date."""
self.plots.insert(0, plot)
if getattr(self, "frame", None) is not None and self.frame >= 0:
self.frame += 1
def __repr__(self):
if getattr(self, "frame", None) is not None:
return "<Overlay %d items (frame=%d) at 0x%x>" % (len(self.plots), self.frame, id(self))
else:
return "<Overlay %d items at 0x%x>" % (len(self.plots), id(self))
def _frameargs(self):
if getattr(self, "frame", None) is not None:
if self.frame >= len(self.plots):
raise ContainerException, "Overlay.frame points to a non-existent plot (%d <= %d)" % (self.frame, len(self.plots))
output = dict(self.plots[self.frame].__dict__)
output.update(self.__dict__)
else:
output = dict(self.__dict__)
for i in self._not_frameargs:
if i in output: del output[i]
for i in output.keys():
if i[0] == "_": del output[i]
return output
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box of all contents as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
if getattr(self, "frame", None) is not None:
if self.frame >= len(self.plots):
raise ContainerException, "Overlay.frame points to a non-existent plot (%d <= %d)" % (self.frame, len(self.plots))
return self.plots[self.frame].ranges(xlog, ylog)
xmins, ymins, xmaxs, ymaxs = [], [], [], []
for plot in self.plots:
xmin, ymin, xmax, ymax = plot.ranges(xlog, ylog)
xmins.append(xmin)
ymins.append(ymin)
xmaxs.append(xmax)
ymaxs.append(ymax)
return min(xmins), min(ymins), max(xmaxs), max(ymaxs)
######################################################### Histograms, bar charts, pie charts
class Stack(Frame):
"""Represents a stack of histograms.
Signature::
Stack(plot1[, plot2[, ...]] [linewidths=list,] [linestyles=list,] [linecolors=list,] [**frameargs])
Arguments:
plots (list of `HistogramAbstract` instances): histograms to be stacked
linewidths (list): list of linewidths with the same length as
the number of histograms
linestyles (list): list of styles
linecolors (list): list of colors
fillcolors (list): list of fill colors (most commonly used to
distinguish between stacked histograms
Public members:
`plots`, `linewidths`, `linestyles`, `linecolors`, `fillcolors`
Behavior:
It is *not* possible to create an empty Stack (no plots).
If `linewidths`, `linestyles`, `linecolors`, or `fillcolors`
are not specified, the input histograms' own styles will be
used.
"""
_not_frameargs = ["plots", "linewidths", "linestyles", "linecolors", "fillcolors"]
def __init__(self, first, *others, **frameargs):
self.plots = [first] + list(others)
Frame.__init__(self, **frameargs)
def __repr__(self):
return "<Stack %d at 0x%x>" % (len(self.plots), id(self))
def bins(self):
"""Returns a list of histogram (low, high) bin edges.
Exceptions:
Raises `ContainerException` if any of the histogram bins
differ (ignoring small numerical errors).
"""
bins = None
for hold in self.plots:
if bins is None:
bins = hold.bins[:]
else:
same = (len(hold.bins) == len(bins))
if same:
for oldbin, refbin in zip(hold.bins, bins):
if HistogramAbstract._numeric(hold, oldbin) and HistogramAbstract._numeric(hold, refbin):
xepsilon = mathtools.epsilon * abs(refbin[1] - refbin[0])
if abs(oldbin[0] - refbin[0]) > xepsilon or abs(oldbin[1] - refbin[1]) > xepsilon:
same = False
break
else:
if oldbin != refbin:
same = False
break
if not same:
raise ContainerException, "Bins in stacked histograms must be the same"
return bins
def stack(self):
"""Returns a list of new histograms, obtained by stacking the inputs.
Exceptions:
Raises `ContainerException` if any of the histogram bins
differ (ignoring small numerical errors).
"""
if len(self.plots) == 0:
raise ContainerException, "Stack must contain at least one histogram"
for styles in "linewidths", "linestyles", "linecolors", "fillcolors":
if getattr(self, styles, None) is not None:
if len(getattr(self, styles)) != len(self.plots):
raise ContainerException, "There must be as many %s as plots" % styles
bins = self.bins()
gap = max([i.gap for i in self.plots])
output = []
for i in xrange(len(self.plots)):
if getattr(self, "linewidths", None) is not None:
linewidth = self.linewidths[i]
else:
linewidth = self.plots[i].linewidth
if getattr(self, "linestyles", None) is not None:
linestyle = self.linestyles[i]
else:
linestyle = self.plots[i].linestyle
if getattr(self, "linecolors", None) is not None:
linecolor = self.linecolors[i]
else:
linecolor = self.plots[i].linecolor
if getattr(self, "fillcolors", None) is not None:
fillcolor = self.fillcolors[i]
else:
fillcolor = self.plots[i].fillcolor
if isinstance(self.plots[i], HistogramCategorical):
hnew = HistogramCategorical(bins, None, None, 0, linewidth, linestyle, linecolor, fillcolor, gap)
else:
hnew = HistogramAbstract(bins, 0, linewidth, linestyle, linecolor, fillcolor, gap)
for j in xrange(i+1):
for bin in xrange(len(hnew.values)):
hnew.values[bin] += self.plots[j].values[bin]
output.append(hnew)
return output
def overlay(self):
self._stack = self.stack()
self._stack.reverse()
self._overlay = Overlay(*self._stack, frame=0)
self._overlay.plots[0].__dict__.update(self._frameargs())
return self._overlay
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box of all contents as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
self.overlay()
if ylog:
ymin = min(filter(lambda y: y > 0., self._stack[-1].values))
ymax = max(filter(lambda y: y > 0., self._stack[0].values))
else:
ymin = min(list(self._stack[-1].values) + [0.])
ymax = max(self._stack[0].values)
if ymin == ymax:
if ylog:
ymin, ymax = ymin / 2., ymax * 2.
else:
ymin, ymax = ymin - 0.5, ymax + 0.5
return self.plots[0].low(), ymin, self.plots[0].high(), ymax
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None):
self.overlay()
class HistogramAbstract(Frame):
"""Abstract class for histograms: use concrete classes (Histogram, HistogramNonUniform, and HistogramCategorical) instead."""
_not_frameargs = ["bins", "storelimit", "entries", "linewidth", "linestyle", "linecolor", "fillcolor", "gap", "values", "underflow", "overflow", "inflow"]
def __init__(self, bins, storelimit, linewidth, linestyle, linecolor, fillcolor, gap, **frameargs):
self.bins, self.storelimit = bins, storelimit
self.entries = 0
self.linewidth, self.linestyle, self.linecolor, self.fillcolor, self.gap = linewidth, linestyle, linecolor, fillcolor, gap
self.values = numpy.zeros(len(self.bins), numpy.float)
self._sumx = numpy.zeros(len(self.bins), numpy.float)
self.underflow, self.overflow, self.inflow = 0., 0., 0.
if storelimit is None:
self._store = []
self._weights = []
self._lenstore = None
else:
self._store = numpy.empty(storelimit, numpy.float)
self._weights = numpy.empty(storelimit, numpy.float)
self._lenstore = 0
Frame.__init__(self, **frameargs)
def __repr__(self):
return "<HistogramAbstract at 0x%x>" % id(self)
def _numeric(self, bin):
return len(bin) == 2 and isinstance(bin[0], (numbers.Number, numpy.number)) and isinstance(bin[1], (numbers.Number, numpy.number))
def __str__(self):
output = []
output.append("%-30s %s" % ("bin", "value"))
output.append("="*40)
if self.underflow > 0: output.append("%-30s %g" % ("underflow", self.underflow))
for i in xrange(len(self.bins)):
if self._numeric(self.bins[i]):
category = "[%g, %g)" % self.bins[i]
else:
category = "\"%s\"" % self.bins[i]
output.append("%-30s %g" % (category, self.values[i]))
if self.overflow > 0: output.append("%-30s %g" % ("overflow", self.overflow))
if self.inflow > 0: output.append("%-30s %g" % ("inflow", self.inflow))
return "\n".join(output)
def binedges(self):
"""Return numerical values for the the edges of bins."""
categorical = False
for bin in self.bins:
if not self._numeric(bin):
categorical = True
break
if categorical:
lows = map(lambda x: x - 0.5, xrange(len(self.bins)))
highs = map(lambda x: x + 0.5, xrange(len(self.bins)))
return zip(lows, highs)
else:
return self.bins[:]
def center(self, i):
"""Return the center (x value) of bin `i`."""
if self._numeric(self.bins[i]):
return (self.bins[i][0] + self.bins[i][1])/2.
else:
return self.bins[i]
def centers(self):
"""Return the centers of all bins."""
return [self.center(i) for i in range(len(self.bins))]
def centroid(self, i):
"""Return the centroid (average data x value) of bin `i`."""
if self.values[i] == 0.:
return self.center(i)
else:
return self._sumx[i] / self.values[i]
def centroids(self):
"""Return the centroids of all bins."""
return [self.centroid(i) for i in range(len(self.bins))]
def mean(self, decimals=Auto, sigfigs=Auto, string=False):
"""Calculate the mean of the distribution, using bin contents.
Keyword arguments:
decimals (int or `Auto`): number of digits after the decimal
point to found the result, if not `Auto`
sigfigs (int or `Auto`): number of significant digits to round
the result, if not `Auto`
string (bool): return output as a string (forces number of digits)
"""
numer = 0.
denom = 0.
for bin, value in zip(self.bins, self.values):
if self._numeric(bin):
width = bin[1] - bin[0]
center = (bin[0] + bin[1])/2.
else:
raise ContainerException, "The mean of a categorical histogram is not meaningful"
numer += width * value * center
denom += width * value
output = numer/denom
if decimals is not Auto:
if string:
return mathtools.str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not Auto:
if string:
return mathtools.str_sigfigs(output, sigfigs)
else:
return mathtools.round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def rms(self, decimals=Auto, sigfigs=Auto, string=False):
"""Calculate the root-mean-square of the distribution, using bin contents.
Keyword arguments:
decimals (int or `Auto`): number of digits after the decimal
point to found the result, if not `Auto`
sigfigs (int or `Auto`): number of significant digits to round
the result, if not `Auto`
string (bool): return output as a string (forces number of digits)
"""
numer = 0.
denom = 0.
for bin, value in zip(self.bins, self.values):
if self._numeric(bin):
width = bin[1] - bin[0]
center = (bin[0] + bin[1])/2.
else:
raise ContainerException, "The RMS of a categorical histogram is not meaningful"
numer += width * value * center**2
denom += width * value
output = math.sqrt(numer/denom)
if decimals is not Auto:
if string:
return mathtools.str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not Auto:
if string:
return mathtools.str_sigfigs(output, sigfigs)
else:
return mathtools.round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def stdev(self, decimals=Auto, sigfigs=Auto, string=False):
"""Calculate the standard deviation of the distribution, using bin contents.
Keyword arguments:
decimals (int or `Auto`): number of digits after the decimal
point to found the result, if not `Auto`
sigfigs (int or `Auto`): number of significant digits to round
the result, if not `Auto`
string (bool): return output as a string (forces number of digits)
"""
numer1 = 0.
numer2 = 0.
denom = 0.
for bin, value in zip(self.bins, self.values):
if self._numeric(bin):
width = bin[1] - bin[0]
center = (bin[0] + bin[1])/2.
else:
raise ContainerException, "The standard deviation of a categorical histogram is not meaningful"
numer1 += width * value * center
numer2 += width * value * center**2
denom += width * value
output = math.sqrt(numer2/denom - (numer1/denom)**2)
if decimals is not Auto:
if string:
return mathtools.str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not Auto:
if string:
return mathtools.str_sigfigs(output, sigfigs)
else:
return mathtools.round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def store(self):
"""Return a _copy_ of the histogram's stored values (if any)."""
if self._lenstore is None:
return self._store[:]
else:
return self._store[0:self._lenstore]
def weights(self):
"""Return a _copy_ of the histogram's stored weights (if any)."""
if self._lenstore is None:
return self._weights[:]
else:
return self._weights[0:self._lenstore]
def clearbins(self):
"""Clear all bin values, including `underflow`, `overflow`, and `inflow`, and set `entries` to zero."""
self.entries = 0
self.values = numpy.zeros(len(self.bins), self.values.dtype)
self._sumx = numpy.zeros(len(self.bins), self._sumx.dtype)
self.underflow, self.overflow, self.inflow = 0., 0., 0.
def clearstore(self):
"""Clear the histogram's stored values (if any)."""
if self._lenstore is None:
self._store = []
self._weights = []
else:
self._lenstore = 0
def refill(self):
"""Clear and refill all bin values using the stored values (if any)."""
self.clearbins()
self.fill(self._store, self._weights, self._lenstore, fillstore=False)
def support(self):
"""Return the widest interval of bin values with non-zero contents."""
all_numeric = True
for bin in self.bins:
if not self._numeric(bin):
all_numeric = False
break
xmin, xmax = None, None
output = []
for bin, value in zip(self.bins, self.values):
if value > 0.:
if all_numeric:
x1, x2 = bin
if xmin is None or x1 < xmin: xmin = x1
if xmax is None or x2 > xmax: xmax = x2
else:
output.append(bin)
if all_numeric: return xmin, xmax
else: return output
def scatter(self, centroids=False, poisson=False, **frameargs):
"""Return the bins and values of the histogram as a Scatter plot.
Arguments:
centroids (bool): if `False`, use bin centers; if `True`,
use centroids
poisson (bool): if `False`, do not create error bars; if
`True`, create error bars assuming the bin contents to
belong to Poisson distributions
Note:
Asymmetric Poisson tail-probability is used for error bars
on quantities up to 20 (using a pre-calculated table);
for 20 and above, a symmetric square root is used
(approximating Poisson(x) ~ Gaussian(x) for x >> 1).
"""
kwds = {"linewidth": self.linewidth,
"linestyle": self.linestyle,
"linecolor": self.linecolor}
kwds.update(frameargs)
def poisson_errorbars(value):
if value < 20:
return {0: (0, 1.1475924708896912),
1: (-1, 1.3593357241843194),
2: (-2, 1.5187126521158518),
3: (-2.1423687562878797, 1.7239415816257235),
4: (-2.2961052720689565, 1.9815257924746845),
5: (-2.4893042928478337, 2.2102901353154891),
6: (-2.6785495948620621, 2.418184093020642),
7: (-2.8588433484599989, 2.6100604797946687),
8: (-3.0300038654056323, 2.7891396571794473),
9: (-3.1927880092968906, 2.9576883353481378),
10: (-3.348085587280849, 3.1173735938098446),
11: (-3.4967228532132424, 3.2694639669834089),
12: (-3.639421017629985, 3.4149513337692667),
13: (-3.7767979638286704, 3.5546286916146812),
14: (-3.9093811537390764, 3.6891418894420838),
15: (-4.0376219573077776, 3.8190252444691453),
16: (-4.1619085382943979, 3.9447267851063259),
17: (-4.2825766762666433, 4.0666265902382577),
18: (-4.3999186228618044, 4.185050401352413),
19: (-4.5141902851535463, 4.3002799167131514)}[value]
else:
return -math.sqrt(value), math.sqrt(value)
if poisson: values = numpy.empty((len(self.bins), 4), dtype=numpy.float)
else: values = numpy.empty((len(self.bins), 2), dtype=numpy.float)
if centroids: values[:,0] = self.centroids()
else: values[:,0] = self.centers()
values[:,1] = self.values
if poisson:
for i in range(len(self.bins)):
values[i,2:4] = poisson_errorbars(self.values[i])
return Scatter(values=values, sig=("x", "y", "eyl", "ey"), **kwds)
else:
return Scatter(values=values, sig=("x", "y"), **kwds)
### to reproduce the table:
# from scipy.stats import poisson
# from scipy.optimize import bisect
# from math import sqrt
# def calculate_entry(value):
# def down(x):
# if x < 1e-5:
# return down(1e-5) - x
# else:
# if value in (0, 1, 2):
# return poisson.cdf(value, x) - 1. - 2.*0.3413
# else:
# return poisson.cdf(value, x) - poisson.cdf(value, value) - 0.3413
# def up(x):
# if x < 1e-5:
# return up(1e-5) - x
# else:
# if value in (0, 1, 2):
# return poisson.cdf(value, x) - 1. + 2.*0.3413
# else:
# return poisson.cdf(value, x) - poisson.cdf(value, value) + 0.3413
# table[value] = bisect(down, -100., 100.) - value, bisect(up, -100., 100.) - value
# if table[value][0] + value < 0.:
# table[value] = -value, table[value][1]
# table = {}
# for i in range(20):
# calculate_entry(i)
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
xmin, ymin, xmax, ymax = None, None, None, None
all_numeric = True
for bin, value in zip(self.bins, self.values):
if self._numeric(bin):
x1, x2 = bin
if (not xlog or x1 > 0.) and (xmin is None or x1 < xmin): xmin = x1
if (not xlog or x2 > 0.) and (xmax is None or x2 > xmax): xmax = x2
else:
all_numeric = False
if (not ylog or value > 0.) and (ymin is None or value < ymin): ymin = value
if (not ylog or value > 0.) and (ymax is None or value > ymax): ymax = value
if not all_numeric:
xmin, xmax = -0.5, len(self.bins) - 0.5
if xmin is None and xmax is None:
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if ymin is None and ymax is None:
if ylog:
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
if xmin == xmax:
if xlog:
xmin, xmax = xmin/2., xmax*2.
else:
xmin, xmax = xmin - 0.5, xmax + 0.5
if ymin == ymax:
if ylog:
ymin, ymax = ymin/2., ymax*2.
else:
ymin, ymax = ymin - 0.5, ymax + 0.5
return xmin, ymin, xmax, ymax
class Histogram(HistogramAbstract):
"""Represent a 1-D histogram with uniform bins.
Arguments:
numbins (int): number of bins
low (float): low edge of first bin
high (float): high edge of last bin
storelimit (int or `None`): maximum number of values to store,
so that the histogram bins can be redrawn; `None` means no
limit
linewidth (float): scale factor for the line used to draw the
histogram border
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of the boundary
line of the histogram area; no line if `None`
fillcolor (string, color, or `None`): fill color of the
histogram area; hollow if `None`
gap (float): space drawn between bins, as a fraction of the bin
width
`**frameargs`: keyword arguments for the coordinate frame
Public members:
bins (list of `(low, high)` pairs): bin intervals (x axis)
values (numpy array of floats): contents of each bin (y axis),
has the same length as `bins`
entries (int): unweighted number of entries accumulated so far
underflow (float): number of values encountered that are less
than all bin ranges
overflow (float): number of values encountered that are greater
than all bin ranges
`storelimit`, `linewidth`, `linestyle`, `linecolor`,
`fillcolor`, and frame arguments.
Behavior:
The histogram bins are initially fixed, but can be 'reshaped'
if `entries <= storelimit`.
After construction, do not set the bins directly; use `reshape`
instead.
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line around the histogram
border.
"""
def __init__(self, numbins, low, high, data=None, weights=None, storelimit=0, linewidth=1., linestyle="solid", linecolor="black", fillcolor=None, gap=0, **frameargs):
self.reshape(numbins, low, high, refill=False, warnings=False)
HistogramAbstract.__init__(self, self.bins, storelimit, linewidth, linestyle, linecolor, fillcolor, gap, **frameargs)
if data is not None:
if weights is not None:
self.fill(data, weights)
else:
self.fill(data)
def low(self):
"""Return the low edge of the lowest bin."""
return self._low
def high(self):
"""Return the high edge of the highest bin."""
return self._high
def __repr__(self):
try:
xlabel = " \"%s\"" % self.xlabel
except AttributeError:
xlabel = ""
return "<Histogram %d %g %g%s at 0x%x>" % (len(self.bins), self.low(), self.high(), xlabel, id(self))
def reshape(self, numbins, low=None, high=None, refill=True, warnings=True):
"""Change the bin structure of the histogram and refill its contents.
Arguments:
numbins (int): new number of bins
low (float or `None`): new low edge, or `None` to keep the
old one
high (float or `None`): new high edge, or `None` to keep
the old one
refill (bool): call `refill` after setting the bins
warnings (bool): raise `ContainerException` if `storelimit
< entries`: that is, if the reshaping cannot be performed
without losing data
"""
if low is None: low = self.low()
if high is None: high = self.high()
if warnings:
if self._lenstore is None:
if len(self._store) < self.entries: raise ContainerException, "Cannot reshape a histogram without a full set of stored data"
else:
if self._lenstore < self.entries: raise ContainerException, "Cannot reshape a histogram without a full set of stored data"
self._low, self._high, self._factor = low, high, numbins/float(high - low)
self._binwidth = (high-low)/float(numbins)
lows = numpy.arange(low, high, self._binwidth)
highs = lows + self._binwidth
self.bins = zip(lows, highs)
if refill: self.refill()
def optimize(self, numbins=utilities.binning, ranges=utilities.calcrange_quartile):
"""Optimize the number of bins and/or range of the histogram.
Arguments:
numbins (function, int, or `None`): function that returns
an optimal number of bins, given a dataset, or a simple
number of bins, or `None` to leave the number of bins as it is
ranges (function, (low, high), or `None`): function that
returns an optimal low, high range, given a dataset, or an
explicit low, high tuple, or `None` to leave the ranges as
they are
"""
if numbins is Auto: numbins = utilities.binning
if ranges is Auto: ranges = utilities.calcrange_quartile
# first do the ranges
if ranges is None:
low, high = self.low(), self.high()
elif isinstance(ranges, (tuple, list)) and len(ranges) == 2 and isinstance(ranges[0], (numbers.Number, numpy.number)) and isinstance(ranges[1], (numbers.Number, numpy.number)):
low, high = ranges
elif callable(ranges):
if self._lenstore is None:
if len(self._store) < self.entries: raise ContainerException, "Cannot optimize a histogram without a full set of stored data"
else:
if self._lenstore < self.entries: raise ContainerException, "Cannot optimize a histogram without a full set of stored data"
low, high = ranges(self._store, self.__dict__.get("xlog", False))
else:
raise ContainerException, "The 'ranges' argument must be a function, (low, high), or `None`."
# then do the binning
if numbins is None:
numbins = len(self.bins)
elif isinstance(numbins, (int, long)):
pass
elif callable(numbins):
if self._lenstore is None:
if len(self._store) < self.entries: raise ContainerException, "Cannot optimize a histogram without a full set of stored data"
storecopy = numpy.array(filter(lambda x: low <= x < high, self._store))
else:
if self._lenstore < self.entries: raise ContainerException, "Cannot optimize a histogram without a full set of stored data"
storecopy = self._store[0:self._lenstore]
numbins = numbins(storecopy, low, high)
else:
raise ContainerException, "The 'numbins' argument must be a function, int, or `None`."
self.reshape(numbins, low, high)
def fill(self, values, weights=None, limit=None, fillstore=True):
"""Put one or many values into the histogram.
Arguments:
values (float or list of floats): value or values to put
into the histogram
weights (float, list of floats, or `None`): weights for
each value; all have equal weight if `weights = None`.
limit (int or `None`): maximum number of values, weights to
put into the histogram
fillstore (bool): also fill the histogram's store (if any)
Behavior:
`itertools.izip` is used to loop over values and weights,
filling the histogram. If values and weights have
different lengths, the filling operation would be truncated
to the shorter list.
Histogram weights are usually either 1 or 1/(value uncertainty)**2.
"""
# handle the case of being given only one value
if isinstance(values, (numbers.Number, numpy.number)):
values = [values]
if weights is None:
weights = numpy.ones(len(values), numpy.float)
for counter, (value, weight) in enumerate(itertools.izip(values, weights)):
if limit is not None and counter >= limit: break
if fillstore:
if self._lenstore is None:
self._store.append(value)
self._weights.append(weight)
elif self._lenstore < self.storelimit:
self._store[self._lenstore] = value
self._weights[self._lenstore] = weight
self._lenstore += 1
index = int(math.floor((value - self._low)*self._factor))
if index < 0:
self.underflow += weight
elif index >= len(self.bins):
self.overflow += weight
else:
self.values[index] += weight
self._sumx[index] += weight * value
self.entries += 1
class HistogramNonUniform(HistogramAbstract):
"""Represent a 1-D histogram with uniform bins.
Arguments:
bins (list of `(low, high)` pairs): user-defined bin intervals
storelimit (int or `None`): maximum number of values to store,
so that the histogram bins can be redrawn; `None` means no
limit
linewidth (float): scale factor for the line used to draw the
histogram border
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of the boundary
line of the histogram area; no line if `None`
fillcolor (string, color, or `None`): fill color of the
histogram area; hollow if `None`
gap (float): space drawn between bins, as a fraction of the bin
width
`**frameargs`: keyword arguments for the coordinate frame
Public members:
values (numpy array of floats): contents of each bin (y axis),
has the same length as `bins`
entries (int): unweighted number of entries accumulated so far
underflow (float): number of values encountered that are less
than all bin ranges
overflow (float): number of values encountered that are greater
than all bin ranges
inflow (float): number of values encountered that are between
bins (if there are any gaps between user-defined bin intervals)
`bins`, `storelimit`, `linewidth`, `linestyle`, `linecolor`,
`fillcolor`, and frame arguments.
Behavior:
If any bin intervals overlap, values will be entered into the
first of the two overlapping bins.
After construction, do not set the bins directly; use `reshape`
instead.
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line around the histogram
border.
"""
def __init__(self, bins, data=None, weights=None, storelimit=0, linewidth=1., linestyle="solid", linecolor="black", fillcolor=None, gap=0, **frameargs):
HistogramAbstract.__init__(self, bins, storelimit, linewidth, linestyle, linecolor, fillcolor, gap, **frameargs)
self._low, self._high = None, None
for low, high in self.bins:
if self._low is None or low < self._low:
self._low = low
if self._high is None or high > self._high:
self._high = high
if data is not None:
if weights is not None:
self.fill(data, weights)
else:
self.fill(data)
def low(self):
"""Return the low edge of the lowest bin."""
return self._low
def high(self):
"""Return the high edge of the highest bin."""
return self._high
def __repr__(self):
try:
xlabel = " \"%s\"" % self.xlabel
except AttributeError:
xlabel = ""
return "<HistogramNonUniform %d%s at 0x%x>" % (len(self.bins), xlabel, id(self))
def reshape(self, bins, refill=True, warnings=True):
"""Change the bin structure of the histogram and refill its contents.
Arguments:
bins (list of `(low, high)` pairs): user-defined bin intervals
refill (bool): call `refill` after setting the bins
warnings (bool): raise `ContainerException` if `storelimit
< entries`: that is, if the reshaping cannot be performed
without losing data
"""
if warnings:
if self._lenstore is None:
if len(self._store) < self.entries: raise ContainerException, "Cannot reshape a histogram without a full set of stored data"
else:
if self._lenstore < self.entries: raise ContainerException, "Cannot reshape a histogram without a full set of stored data"
self.bins = bins
if refill: self.refill()
def fill(self, values, weights=None, limit=None, fillstore=True):
"""Put one or many values into the histogram.
Arguments:
values (float or list of floats): value or values to put
into the histogram
weights (float, list of floats, or `None`): weights for
each value; all have equal weight if `weights = None`.
limit (int or `None`): maximum number of values, weights to
put into the histogram
fillstore (bool): also fill the histogram's store (if any)
Behavior:
`itertools.izip` is used to loop over values and weights,
filling the histogram. If values and weights have
different lengths, the filling operation would be truncated
to the shorter list.
Histogram weights are usually either 1 or 1/(value uncertainty)**2.
"""
# handle the case of being given only one value
if isinstance(values, (numbers.Number, numpy.number)):
values = [values]
if weights is None:
weights = numpy.ones(len(values), numpy.float)
for counter, (value, weight) in enumerate(itertools.izip(values, weights)):
if limit is not None and counter >= limit: break
if fillstore:
if self._lenstore is None:
self._store.append(value)
self._weights.append(weight)
elif self._lenstore < self.storelimit:
self._store[self._lenstore] = value
self._weights[self._lenstore] = weight
self._lenstore += 1
filled = False
less_than_all = True
greater_than_all = True
for i, (low, high) in enumerate(self.bins):
if low <= value < high:
self.values[i] += weight
self._sumx[i] += weight * value
filled = True
break
elif not (value < low): less_than_all = False
elif not (value >= high): greater_than_all = False
if not filled:
if less_than_all: self.underflow += weight
elif greater_than_all: self.overflow += weight
else: self.inflow += weight
self.entries += 1
class HistogramCategorical(HistogramAbstract):
"""Represent a 1-D histogram with categorical bins (a bar chart).
Arguments:
bins (list of strings): names of the categories
storelimit (int or `None`): maximum number of values to store,
so that the histogram bins can be redrawn; `None` means no
limit
linewidth (float): scale factor for the line used to draw the
histogram border
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of the boundary
line of the histogram area; no line if `None`
fillcolor (string, color, or `None`): fill color of the
histogram area; hollow if `None`
gap (float): space drawn between bins, as a fraction of the bin
width
`**frameargs`: keyword arguments for the coordinate frame
Public members:
values (numpy array of floats): contents of each bin (y axis),
has the same length as `bins`
entries (int): unweighted number of entries accumulated so far
inflow (float): number of values encountered that do not belong
to any bins
`bins`, `storelimit`, `linewidth`, `linestyle`, `linecolor`,
`fillcolor`, and frame arguments.
Behavior:
After construction, never change the bins.
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line around the histogram
border.
"""
def __init__(self, bins, data=None, weights=None, storelimit=0, linewidth=1., linestyle="solid", linecolor="black", fillcolor=None, gap=0.1, **frameargs):
self._catalog = dict(map(lambda (x, y): (y, x), enumerate(bins)))
HistogramAbstract.__init__(self, bins, storelimit, linewidth, linestyle, linecolor, fillcolor, gap, **frameargs)
if data is not None:
if weights is not None:
self.fill(data, weights)
else:
self.fill(data)
def __repr__(self):
try:
xlabel = " \"%s\"" % self.xlabel
except AttributeError:
xlabel = ""
return "<HistogramCategorical %d%s at 0x%x>" % (len(self.bins), xlabel, id(self))
def low(self):
"""Return the effective low edge, with all categories treated as integers (-0.5)."""
return -0.5
def high(self):
"""Return the effective low edge, with all categories treated as integers (numbins - 0.5)."""
return len(self.bins) - 0.5
def top(self, N):
"""Return a simplified histogram containing only the top N values (sorted)."""
pairs = zip(self.bins, self.values)
pairs.sort(lambda a, b: cmp(b[1], a[1]))
othervalue = sum([values for bins, values in pairs[N:]])
bins, values = zip(*pairs[:N])
h = HistogramCategorical(list(bins) + ["other"])
h.values = numpy.array(list(values) + [othervalue])
for name, value in self.__dict__.items():
if name not in ("bins", "values"):
h.__dict__[name] = value
return h
def binorder(self, *neworder):
"""Specify a new order for the bins with a list of string arguments (updating bin values).
All arguments must be the names of existing bins.
If a bin name is missing, it will be deleted!
"""
reverse = dict(map(lambda (x, y): (y, x), enumerate(self.bins)))
indicies = []
for name in neworder:
if name not in self.bins:
raise ContainerException, "Not a recognized bin name: \"%s\"." % name
indicies.append(reverse[name])
newinflow = 0.
for i, name in enumerate(self.bins):
if name not in neworder:
newinflow += self.values[i]
self.bins = [self.bins[i] for i in indicies]
indicies = numpy.array(indicies)
self.values = self.values[indicies]
self._sumx = self._sumx[indicies]
self.inflow += newinflow
def fill(self, values, weights=None, limit=None, fillstore=True):
"""Put one or many values into the histogram.
Arguments:
values (float or list of floats): value or values to put
into the histogram
weights (float, list of floats, or `None`): weights for
each value; all have equal weight if `weights = None`.
limit (int or `None`): maximum number of values, weights to
put into the histogram
fillstore (bool): also fill the histogram's store (if any)
Behavior:
`itertools.izip` is used to loop over values and weights,
filling the histogram. If values and weights have
different lengths, the filling operation would be truncated
to the shorter list.
Histogram weights are usually either 1 or 1/(value uncertainty)**2.
"""
# handle the case of being given only one value
if isinstance(values, basestring):
values = [values]
if weights is None:
weights = numpy.ones(len(values), numpy.float)
for counter, (value, weight) in enumerate(itertools.izip(values, weights)):
if limit is not None and counter >= limit: break
try:
value = self._catalog[value]
self.values[value] += weight
self._sumx[value] += weight * value
except KeyError:
value = -1
self.inflow += weight
self.entries += 1
if fillstore:
if self._lenstore is None:
self._store.append(value)
self._weights.append(weight)
elif self._lenstore < self.storelimit:
self._store[self._lenstore] = value
self._weights[self._lenstore] = weight
self._lenstore += 1
######################################################### Scatter plots, with and without error bars, and timeseries
class Scatter(Frame):
"""Represents a scatter of X-Y points, a line graph, and error bars.
Signatures::
Scatter(values, sig, ...)
Scatter(x, y, [ex,] [ey,] [exl,] [eyl,] ...)
Arguments for signature 1:
values (numpy array of N-dimensional points): X-Y points to
draw (with possible error bars)
sig (list of strings): how to interpret each N-dimensional
point, e.g. `('x', 'y', 'ey')` for triplets of x, y, and y
error bars
Arguments for signature 2:
x (list of floats): x values
y (list of floats): y values
ex (list of floats or `None`): symmetric or upper errors in x;
`None` for no x error bars
ey (list of floats or `None`): symmetric or upper errors in y
exl (list of floats or `None`): asymmetric lower errors in x
eyl (list of floats or `None`): asymmetric lower errors in y
Arguments for both signatures:
limit (int or `None`): maximum number of points to draw
(randomly selected if less than total number of points)
calcrange (function): a function that chooses a reasonable range
to plot, based on the data (overruled by `xmin`, `xmax`, etc.)
marker (string or `None`): symbol to draw at each point; `None`
for no markers (e.g. just lines)
markersize (float): scale factor to resize marker points
markercolor (string, color, or `None`): color of the marker
points; hollow markers if `None`
markeroutline (string, color, or `None`): color of the outline
of each marker; no outline if `None`
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of a line
connecting all points; no line if `None`
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`values`, `sig`, `limit`, `calcrange`, `marker`, `markersize`,
`markercolor`, `markeroutline`, `lines`, `linewidth`,
`linestyle`, `linecolor`, and frame arguments.
Behavior:
Points are stored internally as an N-dimensional numpy array of
`values`, with meanings specified by `sig`.
Input points are _copied_, not set by reference, with both
input methods. The set-by-signature method is likely to be
faster for large datasets.
Setting `limit` to a value other than `None` restricts the
number of points to draw in the graphical backend, something
that may be necessary if the number of points is very large. A
random subset is selected when the scatter plot is drawn.
The numerical `limit` refers to the number of points drawn
*within a coordinate frame,* so zooming in will reveal more
points.
Since the input set of points is not guaranteed to be
monatonically increasing in x, a line connecting all points
might cross itself.
Setting `marker = None` is the only proper way to direct the
graphics backend to not draw a marker at each visible point
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line connecting all visible points
Exceptions:
At least `x` and `y` are required.
"""
_not_frameargs = ["sig", "values", "limit", "calcrange", "marker", "markersize", "markercolor", "markeroutline", "linewidth", "linestyle", "linecolor"]
def __init__(self, values=[], sig=None, x=None, y=None, ex=None, ey=None, exl=None, eyl=None, limit=None, calcrange=utilities.calcrange, marker="circle", markersize=1., markercolor="black", markeroutline=None, linewidth=1., linestyle="solid", linecolor=None, **frameargs):
self.limit, self.calcrange = limit, calcrange
self.marker, self.markersize, self.markercolor, self.markeroutline, self.linewidth, self.linestyle, self.linecolor = marker, markersize, markercolor, markeroutline, linewidth, linestyle, linecolor
if sig is None:
self.setvalues(x, y, ex, ey, exl, eyl)
else:
self.setbysig(values, sig)
Frame.__init__(self, **frameargs)
def __repr__(self):
if self.limit is None:
return "<Scatter %d (draw all) at 0x%x>" % (len(self.values), id(self))
else:
return "<Scatter %d (draw %d) at 0x%x>" % (len(self.values), self.limit, id(self))
def index(self):
"""Returns a dictionary of sig values ("x", "y", etc.) to `values` index.
Example usage::
scatter.values[0:1000,scatter.index()["ex"]]
returns the first thousand x error bars.
"""
return dict(zip(self.sig, range(len(self.sig))))
def sort(self, key="x"):
"""Sorts the data so that lines do not intersect themselves."""
self.values = self.values[self.values[:,self.index()[key]].argsort(),]
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None):
if len(self.values) == 0:
self._xlimited_values = numpy.array([], dtype=numpy.float)
self._limited_values = numpy.array([], dtype=numpy.float)
return
index = self.index()
# select elements within the given ranges
mask = numpy.ones(len(self.values), dtype="bool")
x = self.values[:,index["x"]]
y = self.values[:,index["y"]]
if "ex" in index:
numpy.logical_and(mask, (x + abs(self.values[:,index["ex"]]) > xmin), mask)
else:
numpy.logical_and(mask, (x > xmin), mask)
if "exl" in index:
numpy.logical_and(mask, (x - abs(self.values[:,index["exl"]]) < xmax), mask)
elif "ex" in index:
numpy.logical_and(mask, (x - abs(self.values[:,index["ex"]]) < xmax), mask)
else:
numpy.logical_and(mask, (x < xmax), mask)
self._xlimited_values = self.values[mask]
if "ey" in index:
numpy.logical_and(mask, (y + abs(self.values[:,index["ey"]]) > ymin), mask)
else:
numpy.logical_and(mask, (y > ymin), mask)
if "eyl" in index:
numpy.logical_and(mask, (y - abs(self.values[:,index["eyl"]]) < ymax), mask)
elif "ey" in index:
numpy.logical_and(mask, (y - abs(self.values[:,index["ey"]]) < ymax), mask)
else:
numpy.logical_and(mask, (y < ymax), mask)
inrange = self.values[mask]
# select an unbiased subset
if self.limit is not None and self.limit < len(inrange):
### Sometimes returns only the lower half of the range???
# index = numpy.array([], dtype=numpy.integer)
# while len(index) < self.limit:
# index = numpy.concatenate((index, numpy.random.random_integers(0, len(inrange) - 1, self.limit)))
# index = numpy.unique(index)
# index = numpy.resize(index, self.limit)
# self._limited_values = inrange[index]
### Simpler way to calculate the same thing
self._limited_values = inrange[random.sample(xrange(len(inrange)), self.limit)]
else:
self._limited_values = inrange
def setbysig(self, values, sig=("x", "y")):
"""Sets the values using a signature.
Arguments:
values (numpy array of N-dimensional points): X-Y points to
draw (with possible error bars)
sig (list of strings): how to interpret each N-dimensional
point, e.g. `('x', 'y', 'ey')` for triplets of x, y, and y
error bars
Exceptions:
At least `x` and `y` are required.
"""
if "x" not in sig or "y" not in sig:
raise ContainerException, "Signature must contain \"x\" and \"y\""
self.sig = sig
self.values = numpy.array(values, dtype=numpy.float)
def setvalues(self, x=None, y=None, ex=None, ey=None, exl=None, eyl=None):
"""Sets the values with separate lists.
Arguments:
x (list of floats or strings): x values
y (list of floats or strings): y values
ex (list of floats or `None`): symmetric or upper errors in x;
`None` for no x error bars
ey (list of floats or `None`): symmetric or upper errors in y
exl (list of floats or `None`): asymmetric lower errors in x
eyl (list of floats or `None`): asymmetric lower errors in y
Exceptions:
At least `x` and `y` are required.
"""
if x is None and y is None:
raise ContainerException, "Signature must contain \"x\" and \"y\""
longdim = 0
shortdim = 0
if x is not None:
longdim = max(longdim, len(x))
shortdim += 1
if y is not None:
longdim = max(longdim, len(y))
shortdim += 1
if ex is not None:
longdim = max(longdim, len(ex))
shortdim += 1
if ey is not None:
longdim = max(longdim, len(ey))
shortdim += 1
if exl is not None:
longdim = max(longdim, len(exl))
shortdim += 1
if eyl is not None:
longdim = max(longdim, len(eyl))
shortdim += 1
self.sig = []
self.values = numpy.empty((longdim, shortdim), dtype=numpy.float)
if x is not None:
x = numpy.array(x)
if x.dtype.char == "?":
x = numpy.array(x, dtype=numpy.string_)
if x.dtype.char in numpy.typecodes["Character"] + "Sa":
if len(x) > 0:
unique = numpy.unique(x)
self._xticks = dict(map(lambda (i, val): (float(i+1), val), enumerate(unique)))
strtoval = dict(map(lambda (i, val): (val, float(i+1)), enumerate(unique)))
x = numpy.apply_along_axis(numpy.vectorize(lambda s: strtoval[s]), 0, x)
else:
x = numpy.array([], dtype=numpy.float)
self.values[:,len(self.sig)] = x
self.sig.append("x")
if y is not None:
y = numpy.array(y)
if y.dtype.char == "?":
y = numpy.array(y, dtype=numpy.string_)
if y.dtype.char in numpy.typecodes["Character"] + "Sa":
if len(y) > 0:
unique = numpy.unique(y)
self._yticks = dict(map(lambda (i, val): (float(i+1), val), enumerate(unique)))
strtoval = dict(map(lambda (i, val): (val, float(i+1)), enumerate(unique)))
y = numpy.apply_along_axis(numpy.vectorize(lambda s: strtoval[s]), 0, y)
else:
y = numpy.array([], dtype=numpy.float)
self.values[:,len(self.sig)] = y
self.sig.append("y")
if ex is not None:
self.values[:,len(self.sig)] = ex
self.sig.append("ex")
if ey is not None:
self.values[:,len(self.sig)] = ey
self.sig.append("ey")
if exl is not None:
self.values[:,len(self.sig)] = exl
self.sig.append("exl")
if eyl is not None:
self.values[:,len(self.sig)] = eyl
self.sig.append("eyl")
def append(self, x, y, ex=None, ey=None, exl=None, eyl=None):
"""Append one point to the dataset.
Arguments:
x (float): x value
y (float): y value
ex (float or `None`): symmetric or upper error in x
ey (list of floats or `None`): symmetric or upper error in y
exl (list of floats or `None`): asymmetric lower error in x
eyl (list of floats or `None`): asymmetric lower error in y
Exceptions:
Input arguments must match the signature of the dataset
(`sig`).
Considerations:
This method is provided for convenience; it is more
efficient to input all points at once during
construction.
"""
index = self.index()
oldlen = self.values.shape[0]
oldwidth = self.values.shape[1]
for i in self.sig:
if eval(i) is None:
raise ContainerException, "This %s instance requires %s" % (self.__class__.__name__, i)
newvalues = [0.]*oldwidth
if x is not None: newvalues[index["x"]] = x
if y is not None: newvalues[index["y"]] = y
if ex is not None: newvalues[index["ex"]] = ex
if ey is not None: newvalues[index["ey"]] = ey
if exl is not None: newvalues[index["exl"]] = exl
if eyl is not None: newvalues[index["eyl"]] = eyl
self.values.resize((oldlen+1, oldwidth), refcheck=False)
self.values[oldlen,:] = newvalues
def _strip(self, which, limited=False):
try:
index = self.index()[which]
except KeyError:
raise ContainerException, "The signature doesn't have any \"%s\" variable" % which
if limited: return self._limited_values[:,index]
else: return self.values[:,index]
def x(self, limited=False):
"""Return a 1-D numpy array of x values.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("x", limited)
def y(self, limited=False):
"""Return a 1-D numpy array of y values.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("y", limited)
def ex(self, limited=False):
"""Return a 1-D numpy array of x error bars.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("ex", limited)
def ey(self, limited=False):
"""Return a 1-D numpy array of y error bars.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("ey", limited)
def exl(self, limited=False):
"""Return a 1-D numpy array of x lower error bars.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("exl", limited)
def eyl(self, limited=False):
"""Return a 1-D numpy array of y lower error bars.
Arguments:
limited (bool): if True, only return randomly selected
values (must be called after `_prepare()`)
"""
return self._strip("eyl", limited)
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
x = self.x()
y = self.y()
# if we're plotting logarithmically, only the positive values are relevant for ranges
if xlog or ylog:
mask = numpy.ones(len(self.values), dtype="bool")
if xlog:
numpy.logical_and(mask, (x > 0.), mask)
if ylog:
numpy.logical_and(mask, (y > 0.), mask)
x = x[mask]
y = y[mask]
if len(x) < 2:
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if ylog:
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
elif callable(self.calcrange):
xmin, xmax = self.calcrange(x, xlog)
ymin, ymax = self.calcrange(y, ylog)
else:
raise ContainerException, "Scatter.calcrange must be a function."
if xmin == xmax:
if xlog:
xmin, xmax = xmin/2., xmax*2.
else:
xmin, xmax = xmin - 0.5, xmax + 0.5
if ymin == ymax:
if ylog:
ymin, ymax = ymin/2., ymax*2.
else:
ymin, ymax = ymin - 0.5, ymax + 0.5
return xmin, ymin, xmax, ymax
class TimeSeries(Scatter):
"""A scatter-plot in which the x axis is interpreted as time strings.
Arguments:
informat (string or `None`): time formatting string for
interpreting x data (see `time documentation
<http://docs.python.org/library/time.html#time.strftime>`_)
outformat (string): time formatting string for plotting
subseconds (bool): if True, interpret ".xxx" at the end of
the string as fractions of a second
t0 (number or time-string): the time from which to start
counting; zero is equivalent to Jan 1, 1970
x (list of strings): time strings for the x axis
y (list of floats): y values
ex (list of floats or `None`): symmetric or upper errors in x
(in seconds); `None` for no x error bars
ey (list of floats or `None`): symmetric or upper errors in y
exl (list of floats or `None`): asymmetric lower errors in x
eyl (list of floats or `None`): asymmetric lower errors in y
limit (int or `None`): maximum number of points to draw
(randomly selected if less than total number of points)
sortbytime (bool): if True, sort the data in increasing
temporal order
calcrange (function): a function that chooses a reasonable range
to plot, based on the data (overruled by `xmin`, `xmax`, etc.)
marker (string or `None`): symbol to draw at each point; `None`
for no markers (e.g. just lines)
markersize (float): scale factor to resize marker points
markercolor (string, color, or `None`): color of the marker
points; hollow markers if `None`
markeroutline (string, color, or `None`): color of the outline
of each marker; no outline if `None`
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of a line
connecting all points; no line if `None`
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`informat`, `outformat`, `values`, `sig`, `limit`, `calcrange`,
`marker`, `markersize`, `markercolor`, `markeroutline`,
`lines`, `linewidth`, `linestyle`, `linecolor`, and frame
arguments.
Behavior:
Points are stored internally as an N-dimensional numpy array of
`values`, with meanings specified by `sig`.
Input points are _copied_, not set by reference, with both
input methods. The set-by-signature method is likely to be
faster for large datasets.
Setting `limit` to a value other than `None` restricts the
number of points to draw in the graphical backend, something
that may be necessary if the number of points is very large. A
random subset is selected when the scatter plot is drawn.
The numerical `limit` refers to the number of points drawn
*within a coordinate frame,* so zooming in will reveal more
points.
Since the input set of points is not guaranteed to be
monatonically increasing in x, a line connecting all points
might cross itself.
Setting `marker = None` is the only proper way to direct the
graphics backend to not draw a marker at each visible point
Setting `linecolor = None` is the only proper way to direct the
graphics backend to not draw a line connecting all visible points
Exceptions:
At least `x` and `y` are required.
"""
_not_frameargs = Scatter._not_frameargs + ["informat", "outformat"]
def __init__(self, informat="%Y-%m-%d %H:%M:%S", outformat="%Y-%m-%d %H:%M:%S", subseconds=False, t0=0., x=None, y=None, ex=None, ey=None, exl=None, eyl=None, limit=None, sortbytime=True, calcrange=utilities.calcrange, marker=None, markersize=1., markercolor="black", markeroutline=None, linewidth=1., linestyle="solid", linecolor="black", **frameargs):
self.__dict__["informat"] = informat
self.__dict__["outformat"] = outformat
self._subseconds, self._t0 = subseconds, t0
Scatter.__init__(self, x=utilities.fromtimestring(x, informat, subseconds, t0), y=y, ex=ex, ey=ey, exl=exl, eyl=eyl, limit=limit, calcrange=calcrange, marker=marker, markersize=markersize, markercolor=markercolor, markeroutline=markeroutline, linewidth=linewidth, linestyle=linestyle, linecolor=linecolor, **frameargs)
if sortbytime: self.sort("x")
def __repr__(self):
if self.limit is None:
return "<TimeSeries %d (draw all) at 0x%x>" % (len(self.values), id(self))
else:
return "<TimeSeries %d (draw %d) at 0x%x>" % (len(self.values), self.limit, id(self))
def append(self, x, y, ex=None, ey=None, exl=None, eyl=None):
"""Append one point to the dataset.
Arguments:
x (string): x value (a time-string)
y (float): y value
ex (float or `None`): symmetric or upper error in x
ey (list of floats or `None`): symmetric or upper error in y
exl (list of floats or `None`): asymmetric lower error in x
eyl (list of floats or `None`): asymmetric lower error in y
Exceptions:
Input arguments must match the signature of the dataset
(`sig`).
Considerations:
This method is provided for convenience; it is more
efficient to input all points at once during
construction.
"""
Scatter.append(self, utilities.fromtimestring(x, self.informat, self._subseconds, self._t0), y, ex, ey, exl, eyl)
def totimestring(self, timenumbers):
"""Convert a number of seconds or a list of numbers into time string(s).
Arguments:
timenumbers (number or list of numbers): time(s) to be
converted
Behavior:
If only one `timenumbers` is passed, the return value is a
single string; if a list of strings is passed, the return value
is a list of strings.
Uses this timeseries's `outformat` and `t0` for the conversion.
"""
return utilities.totimestring(timenumbers, self.outformat, self._subseconds, self._t0)
def fromtimestring(self, timestrings):
"""Convert a time string or many time strings into a number(s) of seconds.
Arguments:
timestring (string or list of strings): time string(s) to be
converted
Behavior:
If only one `timestring` is passed, the return value is a
single number; if a list of strings is passed, the return value
is a list of numbers.
Uses this timeseries's `informat` and `t0` for the
conversion.
"""
return utilities.fromtimestring(timestrings, self.informat, self._subseconds, self._t0)
def timeticks(self, major, minor, start=None):
"""Set x tick-marks to temporally meaningful values.
Arguments:
major (number): number of seconds interval (may use combinations
of SECOND, MINUTE, HOUR, DAY, WEEK, MONTH, or YEAR constants)
for major ticks (ticks with labels)
minor (number): same for minor ticks (shorter ticks without labels)
start (number, string, or `None`): a time to set the offset
of the tick-marks (use `t0` if `None`)
Behavior:
A "month" is taken to be exactly 31 days and a "year" is
taken to be exactly 365 days. Week markers will only line
up with month markers at `start`.
"""
if isinstance(start, basestring): start = fromtimestring(start)
return utilities.timeticks(major, minor, self.outformat, self._subseconds, self._t0, start)
######################################################### Colorfield
class ColorField(Frame):
_not_frameargs = ["values", "zmin", "zmax", "zlog", "components", "tocolor", "smooth"]
def __init__(self, xbins, xmin, xmax, ybins, ymin, ymax, zmin=Auto, zmax=Auto, zlog=False, components=1, tocolor=color.gradients["rainbow"], smooth=False, **frameargs):
self.xmin, self.xmax, self.ymin, self.ymax, self.zmin, self.zmax, self.tocolor, self.smooth = xmin, xmax, ymin, ymax, zmin, zmax, tocolor, smooth
if components == 1:
self.values = numpy.zeros((xbins, ybins), numpy.float)
else:
self.values = numpy.zeros((xbins, ybins, components), numpy.float)
Frame.__init__(self, **frameargs)
def __repr__(self):
if self.components() == 1:
return "<ColorField [%d][%d] x=(%g, %g) y=(%g, %g) at 0x%x>" % (self.xbins(), self.ybins(), self.xmin, self.xmax, self.ymin, self.ymax, id(self))
else:
return "<ColorField [%d][%d][%d] x=(%g, %g) y=(%g, %g) at 0x%x>" % (self.xbins(), self.ybins(), self.components(), self.xmin, self.xmax, self.ymin, self.ymax, id(self))
def xbins(self):
return self.values.shape[0]
def ybins(self):
return self.values.shape[1]
def components(self):
if len(self.values.shape) > 2:
return self.values.shape[2]
else:
return 1
def index(self, x, y):
xindex = int(math.floor((x - self.xmin)*self.values.shape[0]/(self.xmax - self.xmin)))
if not (0 <= xindex < self.values.shape[0]):
raise ContainerException, "The value %g is not between xmin=%g and xmax=%g." % (x, self.xmin, self.xmax)
yindex = int(math.floor((y - self.ymin)*self.values.shape[1]/(self.ymax - self.ymin)))
if not (0 <= yindex < self.values.shape[1]):
raise ContainerException, "The value %g is not between ymin=%g and ymax=%g." % (y, self.ymin, self.ymax)
return xindex, yindex
def center(self, i, j):
x = (i + 0.5)*(self.xmax - self.xmin)/float(self.values.shape[0]) + self.xmin
if not (self.xmin <= x <= self.xmax):
raise ContainerException, "The index %d is not between 0 and xbins=%d" % (i, self.values.shape[0])
y = (j + 0.5)*(self.ymax - self.ymin)/float(self.values.shape[1]) + self.ymin
if not (self.ymin <= y <= self.ymax):
raise ContainerException, "The index %d is not between 0 and ybins=%d" % (j, self.values.shape[1])
return x, y
def map(self, func):
ybins = self.ybins()
for i in xrange(self.xbins()):
for j in xrange(ybins):
self.values[i,j] = func(*self.center(i, j))
def remap(self, func):
ybins = self.ybins()
for i in xrange(self.xbins()):
for j in xrange(ybins):
self.values[i,j] = func(*self.center(i, j), old=self.values[i,j])
def zranges(self):
ybins = self.ybins()
components = self.components()
if components == 1:
zmin, zmax = None, None
else:
zmin, zmax = [None]*self.components(), [None]*self.components()
for i in xrange(self.xbins()):
for j in xrange(ybins):
if components == 1:
if zmin is None or self.values[i,j] < zmin: zmin = self.values[i,j]
if zmax is None or self.values[i,j] > zmax: zmax = self.values[i,j]
else:
for k in xrange(components):
if zmin[k] is None or self.values[i,j,k] < zmin[k]: zmin[k] = self.values[i,j,k]
if zmax[k] is None or self.values[i,j,k] > zmax[k]: zmax[k] = self.values[i,j,k]
return zmin, zmax
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
return self.xmin, self.ymin, self.xmax, self.ymax
######################################################### Subregions of the plane
class Region(Frame):
"""Represents an enclosed region of the plane.
Signature::
Region([command1[, command2[, command3[, ...]]]], [linewidth=width,] [linestyle=style,] [linecolor=color,] [fillcolor=color,] [**frameargs])
Arguments:
commands (list of RegionCommands): a list of `MoveTo`, `EdgeTo`,
or `ClosePolygon` commands; commands have the same structure as
SVG path data, but may have infinite arguments (to enclose an
unbounded region of the plane)
fillcolor (string or color): fill color of the enclosed region
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`commands`, `fillcolor`, and frame arguments.
"""
_not_frameargs = ["commands", "fillcolor"]
def __init__(self, *commands, **kwds):
self.commands = list(commands)
params = {"fillcolor": "lightblue"}
params.update(kwds)
Frame.__init__(self, **params)
def __repr__(self):
return "<Region (%s commands) at 0x%x>" % (len(self.commands), id(self))
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
xmin, ymin, xmax, ymax = None, None, None, None
for command in self.commands:
if not isinstance(command, RegionCommand):
raise ContainerException, "Commands passed to Region must all be RegionCommands (MoveTo, EdgeTo, ClosePolygon)"
for x, y in command.points():
if not isinstance(x, mathtools.InfiniteType) and not xlog or x > 0.:
if xmin is None or x < xmin: xmin = x
if xmax is None or x > xmax: xmax = x
if not isinstance(y, mathtools.InfiniteType) and not ylog or y > 0.:
if ymin is None or y < ymin: ymin = y
if ymax is None or y > ymax: ymax = y
if xmin is None:
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if ymin is None:
if ylog:
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
if xmin == xmax:
if xlog:
xmin, xmax = xmin/2., xmax*2.
else:
xmin, xmax = xmin - 0.5, xmax + 0.5
if ymin == ymax:
if ylog:
ymin, ymax = ymin/2., ymax*2.
else:
ymin, ymax = ymin - 0.5, ymax + 0.5
return xmin, ymin, xmax, ymax
class RegionCommand:
def points(self): return []
class MoveTo(RegionCommand):
"""Represents a directive to move the pen to a specified point."""
def __init__(self, x, y):
self.x, self.y = x, y
def __repr__(self):
if isinstance(self.x, (numbers.Number, numpy.number)): x = "%g" % self.x
else: x = repr(self.x)
if isinstance(self.y, (numbers.Number, numpy.number)): y = "%g" % self.y
else: y = repr(self.y)
return "MoveTo(%s, %s)" % (x, y)
def points(self): return [(self.x, self.y)]
class EdgeTo(RegionCommand):
"""Represents a directive to draw an edge to a specified point."""
def __init__(self, x, y):
self.x, self.y = x, y
def __repr__(self):
if isinstance(self.x, (numbers.Number, numpy.number)): x = "%g" % self.x
else: x = repr(self.x)
if isinstance(self.y, (numbers.Number, numpy.number)): y = "%g" % self.y
else: y = repr(self.y)
return "EdgeTo(%s, %s)" % (x, y)
def points(self): return [(self.x, self.y)]
class ClosePolygon(RegionCommand):
"""Represents a directive to close the current polygon."""
def __repr__(self):
return "ClosePolygon()"
class RegionMap(Frame):
_not_frameargs = ["xbins", "ybins", "categories", "categorizer", "colors", "bordercolor"]
def __init__(self, xbins, xmin, xmax, ybins, ymin, ymax, categories, categorizer, colors=Auto, bordercolor=None, **frameargs):
self.xbins, self.xmin, self.xmax, self.ybins, self.ymin, self.ymax, self.categories, self.categorizer, self.colors, self.bordercolor = xbins, xmin, xmax, ybins, ymin, ymax, categories, categorizer, colors, bordercolor
Frame.__init__(self, **frameargs)
def __repr__(self):
return "<RegionMap [%d][%d] x=(%g, %g) y=(%g, %g) at 0x%x>" % (self.xbins, self.ybins, self.xmin, self.xmax, self.ymin, self.ymax, id(self))
def index(self, x, y):
xindex = int(math.floor((x - self.xmin)*self.xbins/(self.xmax - self.xmin)))
if not (0 <= xindex < self.xbins):
raise ContainerException, "The value %g is not between xmin=%g and xmax=%g." % (x, self.xmin, self.xmax)
yindex = int(math.floor((y - self.ymin)*self.ybins/(self.ymax - self.ymin)))
if not (0 <= yindex < self.ybins):
raise ContainerException, "The value %g is not between ymin=%g and ymax=%g." % (y, self.ymin, self.ymax)
return xindex, yindex
def center(self, i, j):
x = (i + 0.5)*(self.xmax - self.xmin)/float(self.xbins) + self.xmin
if not (self.xmin <= x <= self.xmax):
raise ContainerException, "The index %d is not between 0 and xbins=%d" % (i, self.xbins)
y = (j + 0.5)*(self.ymax - self.ymin)/float(self.ybins) + self.ymin
if not (self.ymin <= y <= self.ymax):
raise ContainerException, "The index %d is not between 0 and ybins=%d" % (j, self.ybins)
return x, y
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
return self.xmin, self.ymin, self.xmax, self.ymax
def _compile(self):
if isinstance(self.categorizer, numpy.ndarray) or callable(self.categorizer):
self._categorizer = self.categorizer
else:
self._categorizer = eval("lambda x, y: (%s)" % self.categorizer)
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=False, ylog=False):
self._compile()
if self.colors is Auto:
cols = color.lightseries(len(self.categories), alternating=False)
else:
cols = self.colors
self._colors = {}
ints = {}
counter = 0
for category, col in zip(self.categories, cols):
self._colors[category] = color.RGB(col).ints()
ints[category] = counter
counter += 1
if self.bordercolor is not None:
asarray = numpy.zeros((self.xbins, self.ybins), dtype=numpy.int)
self._values = []
for i in xrange(self.xbins):
row = []
for j in xrange(self.ybins):
if isinstance(self._categorizer, numpy.ndarray):
category = self.categories[self._categorizer[i,j]]
else:
category = self._categorizer(*self.center(i, j))
row.append(self._colors[category])
if self.bordercolor is not None:
asarray[i,j] = ints[category]
self._values.append(row)
if self.bordercolor is not None:
roll1 = numpy.roll(asarray, 1, 0)
roll2 = numpy.roll(asarray, -1, 0)
roll3 = numpy.roll(asarray, 1, 1)
roll4 = numpy.roll(asarray, -1, 1)
mask = numpy.equal(asarray, roll1)
numpy.logical_and(mask, numpy.equal(asarray, roll2), mask)
numpy.logical_and(mask, numpy.equal(asarray, roll3), mask)
numpy.logical_and(mask, numpy.equal(asarray, roll4), mask)
thecolor = color.RGB(self.bordercolor).ints()
for i in xrange(self.xbins):
for j in xrange(self.ybins):
if not mask[i,j]:
self._values[i][j] = thecolor
######################################################### Curves and functions
class Curve(Frame):
"""Represents a parameterized function.
Arguments:
func (function or string): the function to plot; if callable,
it should take one argument and accept parameters as keywords;
if a string, it should be valid Python code, accepting a
variable name specified by `var`, parameter names to be passed
through `parameters`, and any function in the `math` library
(`cmath` if complex).
xmin, xmax (numbers or `Auto`): nominal range of function input
parameters (dict): parameter name, value pairs to be passed
before plotting
var (string): name of the input variable (string `func` only)
namespace (module, dict, or `None`): names to be used by the
function; for example::
import scipy.special # (sudo apt-get install python-scipy)
curve = Curve("jn(4, x)", namespace=scipy.special)
draw(curve, xmin=-20., xmax=20., fileName="/tmp/tmp.svg")
form (built-in constant): if Curve.FUNCTION, `func` is expected
to input x and output y; if Curve.PARAMETRIC, `func` is expected
to input t and output the tuple (x, y); if Curve.COMPLEX, `func`
is expected to output a 2-D point as a complex number
samples (number or `Auto`): number of sample points or `Auto`
for dynamic sampling (_not yet copied over from SVGFig!_)
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color specification for
the curve
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`horiz`, `vert`, `linewidth`, `linestyle`, `linecolor`, and
frame arguments.
Examples::
>>> c = Curve("sin(x + delta)", 0, 6.28)
>>> c
<Curve x -> sin(x + delta) from 0 to 6.28>
>>> c(0., delta=0.1)
0.099833416646828155
>>> c.parameters = {"delta": 0.1}
>>> draw(c, fileName="/tmp/tmp.svg")
>>> def f(x, delta=0.):
... return math.sin(x + delta)
...
>>> c = Curve(f, 0, 6.28)
>>> c
<Curve f from 0 to 6.28>
>>> c(0., delta=0.1)
0.099833416646828155
"""
_not_frameargs = ["func", "parameters", "var", "namespace", "form", "samples", "linewidth", "linestyle", "linecolor", "FUNCTION", "PARAMETRIC", "COMPLEX"]
class CurveType:
def __init__(self, name): self.name = "Curve." + name
def __repr__(self): return self.name
FUNCTION = CurveType("FUNCTION")
PARAMETRIC = CurveType("PARAMETRIC")
COMPLEX = CurveType("COMPLEX")
def __init__(self, func, xmin=Auto, xmax=Auto, parameters={}, var="x", namespace=None, form=FUNCTION, samples=1000, linewidth=1., linestyle="solid", linecolor="black", **frameargs):
self.func, self.xmin, self.xmax, self.parameters, self.var, self.namespace, self.form, self.samples, self.linewidth, self.linestyle, self.linecolor = func, xmin, xmax, parameters, var, namespace, form, samples, linewidth, linestyle, linecolor
Frame.__init__(self, **frameargs)
def _compile(self, parameters):
if callable(self.func):
self._func = lambda t: self.func(t, **parameters)
self._func.func_name = self.func.func_name
else:
if self.form is self.COMPLEX: g = dict(cmath.__dict__)
else: g = dict(math.__dict__)
# missing these important functions
g["erf"] = mathtools.erf
g["erfc"] = mathtools.erfc
if self.namespace is not None:
if isinstance(self.namespace, dict):
g.update(self.namespace)
else:
g.update(self.namespace.__dict__)
g.update(parameters)
self._func = eval("lambda (%s): (%s)" % (self.var, self.func), g)
self._func.func_name = "%s -> %s" % (self.var, self.func)
def __repr__(self):
if callable(self.func):
func_name = self.func.func_name
else:
func_name = "%s -> %s" % (self.var, self.func)
return "<Curve %s>" % func_name
def __call__(self, values, **parameters):
"""Call the function for a set of values and parameters.
Arguments:
values (number or list of numbers): input(s) to the function
parameters (keyword arguments): parameter values for this
set of evaluations
"""
self._compile(parameters)
if isinstance(values, (numbers.Number, numpy.number)):
singleton = True
values = [values]
else:
singleton = False
if self.form is self.FUNCTION:
output = numpy.empty(len(values), dtype=numpy.float)
elif self.form is self.PARAMETRIC:
output = numpy.empty((len(values), 2), dtype=numpy.float)
elif self.form is self.COMPLEX:
output = numpy.empty(len(values), dtype=numpy.complex)
else:
raise ContainerException, "Curve.form must be one of Curve.FUNCTION, Curve.PARAMETRIC, or Curve.COMPLEX."
try:
for i, value in enumerate(values):
output[i] = self._func(value)
except NameError, err:
raise NameError, "%s: are the Curve's parameters missing (or namespace not set)?" % err
if singleton: output = output[0]
return output
def derivative(self, values, epsilon=mathtools.epsilon, **parameters):
"""Numerically calculate derivative for a set of values and parameters.
Arguments:
values (number or list of numbers): input(s) to the function
parameters (keyword arguments): parameter values for this
set of evaluations
"""
self._compile(parameters)
if isinstance(values, (numbers.Number, numpy.number)):
singleton = True
values = [values]
else:
singleton = False
if self.form is self.FUNCTION:
output = numpy.empty(len(values), dtype=numpy.float)
elif self.form is self.PARAMETRIC:
output = numpy.empty((len(values), 2), dtype=numpy.float)
elif self.form is self.COMPLEX:
raise ContainerException, "Curve.derivative not implemented for COMPLEX functions."
else:
raise ContainerException, "Curve.form must be one of Curve.FUNCTION, Curve.PARAMETRIC, or Curve.COMPLEX."
for i, value in enumerate(values):
up = self._func(value + mathtools.epsilon)
down = self._func(value - mathtools.epsilon)
output[i] = (up - down)/(2. * mathtools.epsilon)
if singleton: output = output[0]
return output
def scatter(self, low, high, samples=Auto, xlog=False, **parameters):
"""Create a `Scatter` object from the evaluated function.
Arguments:
samples (number or `Auto`): number of sample points
low, high (numbers): domain to sample
xlog (bool): if `form` == `FUNCTION`, distribute the sample
points logarithmically
parameters (keyword arguments): parameter values for this
set of evaluations
"""
tmp = self.parameters
tmp.update(parameters)
parameters = tmp
if samples is Auto: samples = self.samples
if self.form is self.FUNCTION:
points = numpy.empty((samples, 2), dtype=numpy.float)
if xlog:
step = (math.log(high) - math.log(low))/(samples - 1.)
points[:,0] = numpy.exp(numpy.arange(math.log(low), math.log(high) + 0.5*step, step))
else:
step = (high - low)/(samples - 1.)
points[:,0] = numpy.arange(low, high + 0.5*step, step)
points[:,1] = self(points[:,0], **parameters)
elif self.form is self.PARAMETRIC:
step = (high - low)/(samples - 1.)
points = self(numpy.arange(low, high + 0.5*step, step), **parameters)
elif self.form is self.COMPLEX:
step = (high - low)/(samples - 1.)
tmp = self(numpy.arange(low, high + 0.5*step, step), **parameters)
points = numpy.empty((samples, 2), dtype=numpy.float)
for i, value in enumerate(tmp):
points[i] = value.real, value.imag
else: raise ContainerException, "Curve.form must be one of Curve.FUNCTION, Curve.PARAMETRIC, or Curve.COMPLEX."
return Scatter(points, ("x", "y"), limit=None, calcrange=utilities.calcrange, marker=None, lines=True, linewidth=self.linewidth, linestyle=self.linestyle, linecolor=self.linecolor, **self._frameargs())
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=False, ylog=False):
if xmin in (None, Auto) and xmax in (None, Auto):
if xlog:
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
elif xmin is None:
if xlog:
xmin = xmax / 2.
else:
xmin = xmax - 1.
elif xmax is None:
if xlog:
xmax = xmin * 2.
else:
xmax = xmin + 1.
self._scatter = self.scatter(xmin, xmax, self.samples, xlog, **self.parameters)
def ranges(self, xlog=False, ylog=False):
"""Return a data-space bounding box as `xmin, ymin, xmax, ymax`.
Arguments:
xlog (bool): requesting a logarithmic x axis (negative
and zero-valued contents are ignored)
ylog (bool): requesting a logarithmic y axis
"""
if getattr(self, "_scatter", None) is not None:
return self._scatter.ranges(xlog=xlog, ylog=ylog)
else:
self._prepare(xlog=xlog)
output = self._scatter.ranges(xlog=xlog, ylog=ylog)
self._scatter = None
return output
def objective(self, data, parnames, method=Auto, exclude=Auto, centroids=False):
"""Return an objective function whose minimum represents a
best fit to a given dataset.
Arguments:
data (`Histogram` or `Scatter`): the data to fit
parnames (list of strings): names of the parameters
method (function or `Auto`): a function that will be called
for each data point to calculate the final value of the
objective function; examples:
`lambda f, x, y: (f - y)**2` chi^2 for data without uncertainties
`lambda f, x, y, ey: (f - y)**2/ey**2` chi^2 with uncertainties
If `method` is `Auto`, an appropriate chi^2 function will
be used.
exclude (function, `Auto`, or `None`): a function that will
be called for each data point to determine whether to
exclude the point; `Auto` excludes only zero values and
`None` excludes nothing
centroids (bool): use centroids of histogram, rather than
centers
"""
if isinstance(data, Histogram):
if isinstance(data, HistogramCategorical):
raise ContainerException, "A fit to a categorical histogram is not meaningful."
if exclude is Auto and method is Auto:
exclude = lambda x, y: y == 0.
else:
exclude = lambda x, y: False
self._exclude = exclude
if method is Auto:
method = lambda f, x, y: (f - y)**2/abs(y)
values = numpy.empty((len(data.bins), 2), dtype=numpy.float)
if centroids: values[:,0] = data.centroids()
else: values[:,0] = data.centers()
values[:,1] = data.values
return eval("lambda %s: sum([method(f, x, y) for f, (x, y) in itertools.izip(curve(values[:,0], **{%s}), values) if not exclude(x, y)])" % (", ".join(parnames), ", ".join(["\"%s\": %s" % (x, x) for x in parnames])), {"method": method, "itertools": itertools, "curve": self, "values": values, "exclude": exclude})
elif isinstance(data, Scatter):
if "ey" in data.sig and "eyl" in data.sig:
if method is Auto:
method = lambda f, x, y, ey, eyl: ((f - y)**2/eyl**2 if f < y else (f - y)**2/ey**2)
if exclude is Auto:
exclude = lambda x, y, ey, eyl: eyl == 0. or ey == 0.
elif exclude is None:
exclude = lambda x, y, ey, eyl: False
elif "ey" in data.sig:
if method is Auto:
method = lambda f, x, y, ey: (f - y)**2/ey**2
if exclude is Auto:
exclude = lambda x, y, ey: ey == 0.
elif exclude is None:
exclude = lambda x, y, ey: False
else:
if method is Auto:
method = lambda f, x, y: (f - y)**2
if exclude is Auto or exclude is None:
exclude = lambda x, y: False
self._exclude = exclude
index = data.index()
if "ey" in data.sig and "eyl" in data.sig:
values = numpy.empty((len(data.values), 4))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
values[:,2] = data.values[:,index["ey"]]
values[:,3] = data.values[:,index["eyl"]]
return eval("lambda %s: sum([method(f, x, y, ey, eyl) for f, (x, y, ey, eyl) in itertools.izip(curve(values[:,0], **{%s}), values) if not exclude(x, y, ey, eyl)])" % (", ".join(parnames), ", ".join(["\"%s\": %s" % (x, x) for x in parnames])), {"method": method, "itertools": itertools, "curve": self, "values": values, "exclude": exclude})
elif "ey" in data.sig:
values = numpy.empty((len(data.values), 3))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
values[:,2] = data.values[:,index["ey"]]
return eval("lambda %s: sum([method(f, x, y, ey) for f, (x, y, ey) in itertools.izip(curve(values[:,0], **{%s}), values) if not exclude(x, y, ey)])" % (", ".join(parnames), ", ".join(["\"%s\": %s" % (x, x) for x in parnames])), {"method": method, "itertools": itertools, "curve": self, "values": values, "exclude": exclude})
else:
values = numpy.empty((len(data.values), 2))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
return eval("lambda %s: sum([method(f, x, y) for f, (x, y) in itertools.izip(curve(values[:,0], **{%s}), values) if not exclude(x, y)])" % (", ".join(parnames), ", ".join(["\"%s\": %s" % (x, x) for x in parnames])), {"method": method, "itertools": itertools, "curve": self, "values": values, "exclude": exclude})
else:
raise ContainerException, "Data for Curve.objective must be a Histogram or a Scatter plot."
def fit(self, data, parameters=Auto, sequence=[("migrad",)], method=Auto, exclude=Auto, centroids=False, **fitter_arguments):
"""Fit this curve to a given dataset, updating its `parameters` and creating a `minimizer` member.
Arguments:
data (`Histogram` or `Scatter`): the data to fit
parameters (dict of strings -> values): the initial
parameters for the fit
sequence (list of (string, arg, arg)): sequence of Minuit
commands to call, with optional arguments
method (function or `Auto`): a function that will be called
for each data point to calculate the final value of the
objective function; examples:
`lambda f, x, y: (f - y)**2` chi^2 for data without uncertainties
`lambda f, x, y, ey: (f - y)**2/ey**2` chi^2 with uncertainties
If `method` is `Auto`, an appropriate chi^2 function will
be used.
exclude (function, `Auto`, or `None`): a function that will
be called for each data point to determine whether to
exclude the point; `Auto` excludes only zero values and
`None` excludes nothing
centroids (bool): use centroids of histogram, rather than
centers
Keyword arguments:
Keyword arguments will be passed to the Minuit object as member data.
"""
if parameters is Auto: parameters = self.parameters
self.minimizer = minuit.Minuit(self.objective(data, parameters.keys(), method=method, exclude=exclude, centroids=centroids))
for name, value in fitter_arguments.items():
exec("self.minimizer.%s = %s" % (name, str(value)))
self.minimizer.values = parameters
# this block is just to set ndf (with all exclusions applied)
ndf = 0
if isinstance(data, Histogram):
if isinstance(data, HistogramCategorical):
raise ContainerException, "A fit to a categorical histogram is not meaningful."
values = numpy.empty((len(data.bins), 2), dtype=numpy.float)
if centroids: values[:,0] = data.centroids()
else: values[:,0] = data.centers()
values[:,1] = data.values
for x, y in values:
if not self._exclude(x, y):
ndf += 1
elif isinstance(data, Scatter):
index = data.index()
if "ey" in data.sig and "eyl" in data.sig:
values = numpy.empty((len(data.values), 4))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
values[:,2] = data.values[:,index["ey"]]
values[:,3] = data.values[:,index["eyl"]]
for x, y, ey, eyl in values:
if not self._exclude(x, y, ey, eyl):
ndf += 1
elif "ey" in data.sig:
values = numpy.empty((len(data.values), 3))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
values[:,2] = data.values[:,index["ey"]]
for x, y, ey in values:
if not self._exclude(x, y, ey):
ndf += 1
else:
values = numpy.empty((len(data.values), 2))
values[:,0] = data.values[:,index["x"]]
values[:,1] = data.values[:,index["y"]]
for x, y in values:
if not self._exclude(x, y):
ndf += 1
else:
raise ContainerException, "Data for Curve.objective must be a Histogram or a Scatter plot."
ndf -= len(parameters)
# end block to set ndf
try:
for command in sequence:
name = command[0]
args = list(command[1:])
for i in range(len(args)):
if isinstance(args[i], basestring): args[i] = "\"%s\"" % args[i]
else: args[i] = str(args[i])
eval("self.minimizer.%s(%s)" % (name, ", ".join(args)))
except Exception as tmp:
self.parameters = self.minimizer.values
self.chi2 = self.minimizer.fval
self.ndf = ndf
self.normalizedChi2 = (self.minimizer.fval / float(self.ndf) if self.ndf > 0 else -1.)
raise tmp
self.parameters = self.minimizer.values
self.chi2 = self.minimizer.fval
self.ndf = ndf
self.normalizedChi2 = (self.minimizer.fval / float(self.ndf) if self.ndf > 0 else -1.)
# reporting results after fitting
def round_errpair(self, parname, n=2):
"""Round a parameter and its uncertainty to n significant figures in
the uncertainty (default is two)."""
if getattr(self, "minimizer", None) is None:
raise ContainerException, "Curve.round_errpair can only be called after fitting."
return mathtools.round_errpair(self.minimizer.values[parname], self.minimizer.errors[parname], n=n)
def str_errpair(self, parname, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result as a string."""
if getattr(self, "minimizer", None) is None:
raise ContainerException, "Curve.str_errpair can only be called after fitting."
return mathtools.str_errpair(self.minimizer.values[parname], self.minimizer.errors[parname], n=n)
def unicode_errpair(self, parname, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result joined by a unicode
plus-minus sign."""
if getattr(self, "minimizer", None) is None:
raise ContainerException, "Curve.unicode_errpair can only be called after fitting."
return mathtools.unicode_errpair(self.minimizer.values[parname], self.minimizer.errors[parname], n=n)
def expr(self, varrepl=None, sigfigs=2):
if callable(self.func):
raise ContainerException, "Curve.expr only works for string-based functions."
if getattr(self, "minimizer", None) is None:
raise ContainerException, "Curve.expr can only be called after fitting."
output = self.func[:]
for name, value in self.minimizer.values.items():
if sigfigs is None:
value = ("%g" % value)
else:
value = mathtools.str_sigfigs(value, sigfigs)
output = re.sub(r"\b%s\b" % name, value, output)
if varrepl is not None:
output = re.sub(r"\b%s\b" % self.var, varrepl, output)
return output
######################################################### Grids, horiz/vert lines, annotations
class Line(Frame):
"""Represents a line drawn between two points (one of which may be at infinity).
Arguments:
x1, y1 (numbers): a point; either coordinate can be Infinity or
multiples of Infinity
x2, y2 (numbers): another point; either coordinate can be
Infinity or multiples of Infinity
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string or color): color specification for grid line(s)
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`x1`, `y1`, `x2`, `y2`, `linewidth`, `linestyle`, `linecolor`,
and frame arguments.
"""
_not_frameargs = ["x1", "y1", "x2", "y2", "linewidth", "linestyle", "linecolor"]
def __init__(self, x1, y1, x2, y2, linewidth=1., linestyle="solid", linecolor="black", **frameargs):
self.x1, self.y1, self.x2, self.y2, self.linewidth, self.linestyle, self.linecolor = x1, y1, x2, y2, linewidth, linestyle, linecolor
Frame.__init__(self, **frameargs)
def __repr__(self):
if isinstance(self.x1, mathtools.InfiniteType): x1 = repr(self.x1)
else: x1 = "%g" % self.x1
if isinstance(self.y1, mathtools.InfiniteType): y1 = repr(self.y1)
else: y1 = "%g" % self.y1
if isinstance(self.x2, mathtools.InfiniteType): x2 = repr(self.x2)
else: x2 = "%g" % self.x2
if isinstance(self.y2, mathtools.InfiniteType): y2 = repr(self.y2)
else: y2 = "%g" % self.y2
return "<Line %s %s %s %s at 0x%x>" % (x1, y1, x2, y2, id(self))
def ranges(self, xlog=False, ylog=False):
if (isinstance(self.x1, mathtools.InfiniteType) or isinstance(self.y1, mathtools.InfiniteType) or (getattr(self, "xlog", False) and self.x1 <= 0.) or (getattr(self, "ylog", False) and self.y1 <= 0.)) and \
(isinstance(self.x2, mathtools.InfiniteType) or isinstance(self.y2, mathtools.InfiniteType) or (getattr(self, "xlog", False) and self.x2 <= 0.) or (getattr(self, "ylog", False) and self.y2 <= 0.)):
if getattr(self, "xlog", False):
xmin, xmax = 0.1, 1.
else:
xmin, xmax = 0., 1.
if getattr(self, "ylog", False):
ymin, ymax = 0.1, 1.
else:
ymin, ymax = 0., 1.
return xmin, ymin, xmax, ymax
elif isinstance(self.x1, mathtools.InfiniteType) or isinstance(self.y1, mathtools.InfiniteType) or (getattr(self, "xlog", False) and self.x1 <= 0.) or (getattr(self, "ylog", False) and self.y1 <= 0.):
singlepoint = (self.x2, self.y2)
elif isinstance(self.x2, mathtools.InfiniteType) or isinstance(self.y2, mathtools.InfiniteType) or (getattr(self, "xlog", False) and self.x2 <= 0.) or (getattr(self, "ylog", False) and self.y2 <= 0.):
singlepoint = (self.x1, self.y1)
else:
return min(self.x1, self.x2), min(self.y1, self.y2), max(self.x1, self.x2), max(self.y1, self.y2)
# handle singlepoint
if getattr(self, "xlog", False):
xmin, xmax = singlepoint[0]/2., singlepoint[0]*2.
else:
xmin, xmax = singlepoint[0] - 1., singlepoint[0] + 1.
if getattr(self, "ylog", False):
ymin, ymax = singlepoint[1]/2., singlepoint[1]*2.
else:
ymin, ymax = singlepoint[1] - 1., singlepoint[1] + 1.
return xmin, ymin, xmax, ymax
class Grid(Frame):
"""Represents one or more horizontal/vertical lines or a whole grid.
Arguments:
horiz (list of numbers, function, or `None`): a list of values
at which to draw horizontal lines, a function `f(a, b)` taking
an interval and providing such a list, or `None` for no
horizontal lines.
vert (list of numbers, function, or `None`): same for vertical
lines
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string or color): color specification for grid line(s)
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`horiz`, `vert`, `linewidth`, `linestyle`, `linecolor`, and
frame arguments.
Considerations:
The `regular` utility provides functions suitable for `horiz`
and `vert`.
"""
_not_frameargs = ["horiz", "vert", "linewidth", "linestyle", "linecolor"]
def __init__(self, horiz=None, vert=None, linewidth=1., linestyle="dotted", linecolor="grey", **frameargs):
self.horiz, self.vert, self.linewidth, self.linestyle, self.linecolor = horiz, vert, linewidth, linestyle, linecolor
Frame.__init__(self, **frameargs)
def __repr__(self):
return "<Grid %s %s at 0x%x>" % (repr(self.horiz), repr(self.vert), id(self))
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None):
try:
self._horiz = []
for i in self.horiz:
self._horiz.append(i)
except TypeError:
if callable(self.horiz):
try:
self._horiz = self.horiz(ymin, ymax)
except TypeError:
raise ContainerException, "If Grid.horiz is a function, it must take two endpoints and return a list of values"
elif self.horiz is None:
self._horiz = []
else:
raise ContainerException, "Grid.horiz must be None, a list of values, or a function returning a list of values (given endpoints)"
try:
self._vert = []
for i in self.vert:
self._vert.append(i)
except TypeError:
if callable(self.vert):
try:
self._vert = self.vert(xmin, xmax)
except TypeError:
raise ContainerException, "If Grid.vert is a function, it must take two endpoints and return a list of values"
elif self.vert is None:
self._vert = []
else:
raise ContainerException, "Grid.vert must be None, a list of values, or a function returning a list of values (given endpoints)"
######################################################### User-defined plot legend
class Legend(Frame):
"""Represents a table of information to overlay on a plot.
Arguments:
fields (list of lists): table data; may include text, numbers,
and objects with line, fill, or marker styles
colwid (list of numbers): column widths as fractions of the
whole width (minus padding); e.g. [0.5, 0.25, 0.25]
justify (list of "l", "m", "r"): column justification: "l" for
left, "m" or "c" for middle, and "r" for right
x, y (numbers): position of the legend box (use with
`textanchor`) in units of frame width; e.g. (1, 1) is the
top-right corner, (0, 0) is the bottom-left corner
width (number): width of the legend box in units of frame width
height (number or `Auto`): height of the legend box in units of
frame width or `Auto` to calculate from the number of rows,
`baselineskip`, and `padding`
anchor (2-character string): placement of the legend box
relative to `x`, `y`; first character is "t" for top, "m" or
"c" for middle, and "b" for bottom, second character is
"l" for left, "m" or "c" for middle, and "r" for right
textscale (number): scale factor for text (1 is normal)
padding (number): extra space between the legend box and its
contents, as a fraction of the whole SVG document
baselineskip (number): space to skip between rows of the table,
as a fraction of the whole SVG document
linewidth (float): scale factor to resize legend box line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): color of the boundary
around the legend box; no line if `None`
fillcolor (string, color, or `None`): fill color of the legend
box; hollow if `None`
`**frameargs`: keyword arguments for the coordinate frame
Public members:
`fields`, `colwid`, `justify`, `x`, `y`, `width`, `height`,
`anchor`, `textscale`, `padding`, `baselineskip`, `linewidth`,
`linestyle`, `linecolor`, `fillcolor`, and frame arguments.
Considerations:
`Legend` is a drawable data container on its own, not attached
to any histogram or scatter plot. To overlay a `Legend` on
another plot, use the `Overlay` command, and be sure to point
`Overlay.frame` to the desired plot::
Overlay(plot, legend, frame=0)
Legends will always be drawn _above_ the frame (and therefore
also above all other plots in an overlay).
"""
_not_frameargs = ["colwid", "justify", "x", "y", "width", "height", "anchor", "textscale", "padding", "baselineskip", "linewidth", "linestyle", "linecolor", "fillcolor"]
def __init__(self, fields, colwid=Auto, justify="l", x=1., y=1., width=0.4, height=Auto, anchor="tr", textscale=1., padding=0.01, baselineskip=0.035, linewidth=1., linestyle="solid", linecolor="black", fillcolor="white"):
self.fields, self.colwid, self.justify, self.x, self.y, self.width, self.height, self.anchor, self.textscale, self.padding, self.baselineskip, self.linewidth, self.linestyle, self.linecolor, self.fillcolor = fields, colwid, justify, x, y, width, height, anchor, textscale, padding, baselineskip, linewidth, linestyle, linecolor, fillcolor
def __repr__(self):
return "<Legend %dx%d>" % self.dimensions()
def dimensions(self):
"""Determine the number of rows and columns in `fields`."""
rows = 1
columns = 1
if not isinstance(self.fields, basestring):
iterable = False
try:
iter(self.fields)
iterable = True
except TypeError: pass
if iterable:
rows -= 1
for line in self.fields:
if not isinstance(line, basestring):
length = 0
try:
for cell in line:
length += 1
except TypeError: pass
if length > columns: columns = length
rows += 1
return rows, columns
def _prepare(self, xmin=None, ymin=None, xmax=None, ymax=None, xlog=None, ylog=None):
self._rows, self._columns = self.dimensions()
# make _fields a rectangular array with None in missing fields
self._fields = [[None for j in range(self._columns)] for i in range(self._rows)]
if isinstance(self.fields, basestring):
self._fields[0][0] = self.fields
else:
iterable = False
try:
iter(self.fields)
iterable = True
except TypeError: pass
if not iterable:
self._fields[0][0] = self.fields
else:
for i, line in enumerate(self.fields):
if isinstance(line, basestring):
self._fields[i][0] = line
else:
lineiterable = False
try:
iter(line)
lineiterable = True
except TypeError: pass
if not lineiterable:
self._fields[i][0] = line
else:
for j, cell in enumerate(line):
self._fields[i][j] = cell
# take user input if available, fill in what's remaining by evenly splitting the difference
if self.colwid is Auto:
self._colwid = [1./self._columns]*self._columns
else:
self._colwid = list(self.colwid[:self._columns])
if len(self._colwid) < self._columns:
if sum(self._colwid) < 1.:
width = (1. - sum(self._colwid)) / (self._columns - len(self._colwid))
self._colwid.extend([width]*(self._columns - len(self._colwid)))
else:
# or put in typical values if we have to normalize anyway
average = float(sum(self._colwid))/len(self._colwid)
self._colwid.extend([average]*(self._columns - len(self._colwid)))
# normalize: sum of colwid = 1
total = 1.*sum(self._colwid)
for i in range(len(self._colwid)):
self._colwid[i] /= total
# if we only get one directive, repeat for all self._columns
if self.justify is Auto or self.justify == "l":
self._justify = ["l"]*self._columns
elif self.justify == "m" or self.justify == "c":
self._justify = ["m"]*self._columns
elif self.justify == "r":
self._justify = ["r"]*self._columns
else:
# take all user input and fill in whatever's missing with "l"
self._justify = list(self.justify[:self._columns])
if len(self._justify) < self._columns:
self._justify.extend(["l"]*(self._columns - len(self._justify)))
self._anchor = [None, None]
if len(self.anchor) == 2:
if self.anchor[0] == "t": self._anchor[0] = "t"
if self.anchor[0] in ("m", "c"): self._anchor[0] = "m"
if self.anchor[0] == "b": self._anchor[0] = "b"
if self.anchor[1] == "l": self._anchor[1] = "l"
if self.anchor[1] in ("m", "c"): self._anchor[1] = "m"
if self.anchor[1] == "r": self._anchor[1] = "r"
# try the letters backward
if self._anchor[0] is None or self._anchor[1] is None:
self._anchor = [None, None]
if self.anchor[1] == "t": self._anchor[0] = "t"
if self.anchor[1] in ("m", "c"): self._anchor[0] = "m"
if self.anchor[1] == "b": self._anchor[0] = "b"
if self.anchor[0] == "l": self._anchor[1] = "l"
if self.anchor[0] in ("m", "c"): self._anchor[1] = "m"
if self.anchor[0] == "r": self._anchor[1] = "r"
if self._anchor[0] is None or self._anchor[1] is None:
raise ContainerException, "Legend.anchor not recognized: \"%s\"" % self.anchor
class Style:
"""Represents a line, fill, and marker style, but is not drawable.
Arguments:
linewidth (float): scale factor to resize line width
linestyle (tuple or string): "solid", "dashed", "dotted", or a
tuple of numbers representing a dash-pattern
linecolor (string, color, or `None`): stroke color
fillcolor (string, color, or `None`): fill color
marker (string or `None`): symbol at each point
markersize (float): scale factor to resize marker points
markercolor (string, color, or `None`): fill color for markers
markeroutline (string, color, or `None`): stroke color for markers
Public members:
`linewidth`, `linestyle`, `linecolor`, `fillcolor`, `marker`,
`markersize`, `markercolor`, and `markeroutline`.
Purpose:
Can be used in place of a real Histogram/Scatter/etc. in Legend.
"""
def __init__(self, linewidth=1., linestyle="solid", linecolor=None, fillcolor=None, marker=None, markersize=1., markercolor="black", markeroutline=None):
self.linewidth, self.linestyle, self.linecolor, self.fillcolor, self.marker, self.markersize, self.markercolor, self.markeroutline = linewidth, linestyle, linecolor, fillcolor, marker, markersize, markercolor, markeroutline
def __repr__(self):
attributes = [""]
if self.linecolor is not None:
attributes.append("linewidth=%g" % self.linewidth)
attributes.append("linestyle=%s" % str(self.linestyle))
attributes.append("linecolor=%s" % str(self.linecolor))
if self.fillcolor is not None:
attributes.append("fillcolor=%s" % str(self.fillcolor))
if self.marker is not None:
attributes.append("marker=%s" % str(self.marker))
attributes.append("markersize=%g" % self.markersize)
attributes.append("markercolor=%s" % str(self.markercolor))
return "<Style%s>" % " ".join(attributes)
######################################################### Interactive table for a PAW-style analysis
class InspectTable(UniTable):
"""Load, manipulate, and plot data quickly and interactively.
Class members:
cache_limit (int or `None`): a maximum number of preselected
subtables to cache
"""
cache_limit = 10
_comma = re.compile("\s*,\s*")
def __repr__(self):
return "<InspectTable %d keys %d rows>" % (len(self.keys()), len(self))
def _setup_cache(self):
if getattr(self, "_cache_subtables", None) is None:
self._cache_subtables = {}
self._cache_order = []
def __call__(self, expr, cuts=None, use_cache=True):
"""Select and return a subtable based on the expression and cuts.
Arguments:
expr (string): expression to evaluate in the namespace of
the table and plot
cuts (string): expression for filtering out unwanted data
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
"""
if cuts is None or cuts == "":
subtable = self
else:
if use_cache:
self._setup_cache()
if cuts in self._cache_subtables and set(self.keys()) == set(self._cache_subtables[cuts].keys()):
subtable = self._cache_subtables[cuts]
self._cache_order = [cuts] + filter(lambda x: x != cuts, self._cache_order)
else:
subtable = self.compress(self.eval(cuts))
self._cache_subtables[cuts] = subtable
self._cache_order = [cuts] + filter(lambda x: x != cuts, self._cache_order)
if self.cache_limit is not None:
while len(self._cache_order) > self.cache_limit:
del self._cache_subtables[self._cache_order.pop()]
else:
subtable = self.compress(self.eval(cuts))
return subtable.eval(expr)
def unique(self, expr=None, cuts=None, use_cache=True):
if expr is None:
keys = self.keys()
expr = ",".join(keys)
subtable = self(expr, cuts, use_cache)
if isinstance(subtable, tuple):
# can't use numpy because the output may be heterogeneous
output = set()
for event in zip(*subtable):
output.add(event)
return output
else:
return set(numpy.unique(subtable))
def scan(self, expr=None, cuts=None, subset=slice(0, 10), use_cache=True, width=12):
"""Print a table or subtable of values on the screen.
Arguments:
expr (string): comma-separated set of expressions to print
(if `None`, print all fields)
cuts (string): expression for filtering out unwanted data
subset (slice): slice applied to all fields, so that the
output is manageable
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
"""
if expr is None:
keys = self.keys()
expr = ",".join(keys)
subtable = self(expr, cuts, use_cache)
fields = re.split(self._comma, expr)
format_fields = []
separator = []
format_line = []
typechar = []
for field, array in zip(fields, subtable):
format_fields.append("%%%d.%ds" % (width, width))
separator.append("=" * width)
if array.dtype.char in numpy.typecodes["Float"]:
format_line.append("%%%dg" % width)
typechar.append("f")
elif array.dtype.char in numpy.typecodes["AllInteger"]:
format_line.append("%%%dd" % width)
typechar.append("i")
elif array.dtype.char == "?":
format_line.append("%%%ds" % width)
typechar.append("?")
elif array.dtype.char in numpy.typecodes["Complex"]:
format_line.append("%%%dg+%%%dgj" % ((width-2)//2, (width-2)//2))
typechar.append("F")
elif array.dtype.char in numpy.typecodes["Character"] + "Sa":
format_line.append("%%%d.%ds" % (width, width))
typechar.append("S")
format_fields = " ".join(format_fields)
separator = "=".join(separator)
print format_fields % tuple(fields)
print separator
if isinstance(subtable, tuple):
for records in zip(*[i[subset] for i in subtable]):
for r, f, c in zip(records, format_line, typechar):
if c == "F":
print f % (r.real, r.imag),
elif c == "?":
if r: print f % "True",
else: print f % "False",
elif c == "S":
print f % ("'%s'" % r),
else:
print f % r,
print
else:
for record in subtable[subset]:
if typechar[0] == "F":
print format_line[0] % (record.real, record.imag)
elif typechar[0] == "?":
if record: print format_line[0] % "True"
else: print format_line[0] % "False"
elif typechar[0] == "S":
print format_line[0] % ("'%s'" % record)
else:
print format_line[0] % record
def histogram(self, expr, cuts=None, weights=None, numbins=utilities.binning, lowhigh=utilities.calcrange_quartile, use_cache=True, **kwds):
"""Draw and return a histogram based on the expression and cuts.
Arguments:
expr (string): expression to evaluate in the namespace of
the table and plot
cuts (string): expression for filtering out unwanted data
weights (string): optional expression for the weight of
each data entry
numbins (int or function): number of bins or a function
that returns an optimized number of bins, given data, low,
and high
lowhigh ((low, high) or function): range of the histogram or
a function that returns an optimized range given the data
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
`**kwds`: any other arguments are passed to the Histogram
constructor
"""
if numbins is Auto: numbins = utilities.binning
if lowhigh is Auto: lowhigh = utilities.calcrange_quartile
data = self(expr, cuts)
if isinstance(data, tuple):
raise ContainerException, "The expr must return one-dimensional data (no commas!)"
if weights is not None:
dataweight = self(weights, cuts)
if isinstance(data, tuple):
raise ContainerException, "The weights must return one-dimensional data (no commas!)"
else:
dataweight = numpy.ones(len(data), numpy.float)
if len(data) > 0 and data.dtype.char in numpy.typecodes["Character"] + "SU":
bins = numpy.unique(data)
bins.sort()
kwds2 = {"xlabel": expr}
kwds2.update(kwds)
output = HistogramCategorical(bins, data, dataweight, **kwds2)
elif len(data) == 0 or data.dtype.char in numpy.typecodes["Float"] + numpy.typecodes["AllInteger"]:
if isinstance(lowhigh, (tuple, list)) and len(lowhigh) == 2 and isinstance(lowhigh[0], (numbers.Number, numpy.number)) and isinstance(lowhigh[1], (numbers.Number, numpy.number)):
low, high = lowhigh
elif callable(lowhigh):
low, high = lowhigh(data, kwds.get("xlog", False))
else:
raise ContainerException, "The 'lowhigh' argument must be a function or (low, high) tuple."
if isinstance(numbins, (int, long)):
pass
elif callable(numbins):
numbins = numbins(data, low, high)
else:
raise ContainerException, "The 'numbins' argument must be a function or an int."
if numbins < 1: numbins = 1
if low >= high: low, high = 0., 1.
kwds2 = {"xlabel": expr}
kwds2.update(kwds)
output = Histogram(numbins, low, high, data, dataweight, **kwds2)
else:
raise ContainerException, "Unrecognized data type: %s (%s)" % (data.dtype.name, data.dtype.char)
return output
def timeseries(self, expr, cuts=None, ex=None, ey=None, exl=None, eyl=None, limit=1000, use_cache=True, **kwds):
"""Draw and return a scatter-plot based on the expression and cuts.
Arguments:
expr (string): expression to evaluate in the namespace of
the table and plot
cuts (string): expression for filtering out unwanted data
ex (string): optional expression for x error bars (in seconds)
ey (string): optional expression for y error bars
exl (string): optional expression for x lower error bars (in seconds)
eyl (string): optional expression for y lower error bars
limit (int or `None`): set an upper limit on the number of
points that will be drawn
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
`**kwds`: any other arguments are passed to the Scatter
constructor
"""
return self.scatter(expr, cuts, ex, ey, exl, eyl, limit=limit, timeseries=True, use_cache=use_cache, **kwds)
def scatter(self, expr, cuts=None, ex=None, ey=None, exl=None, eyl=None, limit=1000, timeseries=False, use_cache=True, **kwds):
"""Draw and return a scatter-plot based on the expression and cuts.
Arguments:
expr (string): expression to evaluate in the namespace of
the table and plot
cuts (string): expression for filtering out unwanted data
ex (string): optional expression for x error bars
ey (string): optional expression for y error bars
exl (string): optional expression for x lower error bars
eyl (string): optional expression for y lower error bars
limit (int or `None`): set an upper limit on the number of
points that will be drawn
timeseries (bool): if True, produce a TimeSeries, rather
than a Scatter
use_cache (bool): if True, keep track of all preselected
subtables (it is likely that the user will want them again)
`**kwds`: any other arguments are passed to the Scatter
constructor
"""
fields = re.split(self._comma, expr)
data = self(expr, cuts)
# convert one-dimensional complex data into two-dimensional real data
if not isinstance(data, tuple) and data.dtype.char in numpy.typecodes["Complex"]:
data = numpy.real(data), numpy.imag(data)
if not isinstance(data, tuple) or len(data) != 2:
raise ContainerException, "The expr must return two-dimensional data (include a comma!)"
xdata, ydata = data
if ex is not None:
ex = self(ex, cuts)
if isinstance(ex, tuple):
raise ContainerException, "The ex must return one-dimensional data"
if ey is not None:
ey = self(ey, cuts)
if isinstance(ey, tuple):
raise ContainerException, "The ey must return one-dimensional data"
if exl is not None:
exl = self(exl, cuts)
if isinstance(exl, tuple):
raise ContainerException, "The exl must return one-dimensional data"
if eyl is not None:
eyl = self(eyl, cuts)
if isinstance(eyl, tuple):
raise ContainerException, "The eyl must return one-dimensional data"
if timeseries:
if xdata.dtype.char in numpy.typecodes["Float"] + numpy.typecodes["AllInteger"]:
kwds2 = {"xlabel": fields[0], "ylabel": fields[1]}
kwds2.update(kwds)
output = TimeSeries(x=xdata, y=ydata, ex=ex, ey=ey, exl=exl, eyl=eyl, informat=None, limit=limit, **kwds2)
elif xdata.dtype.char in numpy.typecodes["Character"] + "Sa":
kwds2 = {"xlabel": fields[0], "ylabel": fields[1]}
kwds2.update(kwds)
output = TimeSeries(x=xdata, y=ydata, ex=ex, ey=ey, exl=exl, eyl=eyl, limit=limit, **kwds2)
else:
raise ContainerException, "Unsupported data type for x of TimeSeries: %s" % xdata.dtype.name
else:
kwds2 = {"xlabel": fields[0], "ylabel": fields[1]}
kwds2.update(kwds)
output = Scatter(x=xdata, y=ydata, ex=ex, ey=ey, exl=exl, eyl=eyl, limit=limit, **kwds2)
return output
def inspect(*files, **kwds):
output = InspectTable()
first = True
for f in files:
if first:
output.load(f, **kwds)
first = False
else:
output.extend(InspectTable().load(f, **kwds))
return output
| apache-2.0 |
cloudtools/awacs | awacs/es.py | 1 | 3882 | # Copyright (c) 2012-2021, Mark Peek <mark@peek.org>
# All rights reserved.
#
# See LICENSE file for full license.
from .aws import Action as BaseAction
from .aws import BaseARN
service_name = "Amazon Elasticsearch Service"
prefix = "es"
class Action(BaseAction):
def __init__(self, action: str = None) -> None:
super().__init__(prefix, action)
class ARN(BaseARN):
def __init__(self, resource: str = "", region: str = "", account: str = "") -> None:
super().__init__(
service=prefix, resource=resource, region=region, account=account
)
AcceptInboundCrossClusterSearchConnection = Action(
"AcceptInboundCrossClusterSearchConnection"
)
AddTags = Action("AddTags")
AssociatePackage = Action("AssociatePackage")
CancelElasticsearchServiceSoftwareUpdate = Action(
"CancelElasticsearchServiceSoftwareUpdate"
)
CreateElasticsearchDomain = Action("CreateElasticsearchDomain")
CreateElasticsearchServiceRole = Action("CreateElasticsearchServiceRole")
CreateOutboundCrossClusterSearchConnection = Action(
"CreateOutboundCrossClusterSearchConnection"
)
CreatePackage = Action("CreatePackage")
DeleteElasticsearchDomain = Action("DeleteElasticsearchDomain")
DeleteElasticsearchServiceRole = Action("DeleteElasticsearchServiceRole")
DeleteInboundCrossClusterSearchConnection = Action(
"DeleteInboundCrossClusterSearchConnection"
)
DeleteOutboundCrossClusterSearchConnection = Action(
"DeleteOutboundCrossClusterSearchConnection"
)
DeletePackage = Action("DeletePackage")
DescribeElasticsearchDomain = Action("DescribeElasticsearchDomain")
DescribeElasticsearchDomainConfig = Action("DescribeElasticsearchDomainConfig")
DescribeElasticsearchDomains = Action("DescribeElasticsearchDomains")
DescribeElasticsearchInstanceTypeLimits = Action(
"DescribeElasticsearchInstanceTypeLimits"
)
DescribeInboundCrossClusterSearchConnections = Action(
"DescribeInboundCrossClusterSearchConnections"
)
DescribeOutboundCrossClusterSearchConnections = Action(
"DescribeOutboundCrossClusterSearchConnections"
)
DescribePackages = Action("DescribePackages")
DescribeReservedElasticsearchInstanceOfferings = Action(
"DescribeReservedElasticsearchInstanceOfferings"
)
DescribeReservedElasticsearchInstances = Action(
"DescribeReservedElasticsearchInstances"
)
DissociatePackage = Action("DissociatePackage")
ESCrossClusterGet = Action("ESCrossClusterGet")
ESHttpDelete = Action("ESHttpDelete")
ESHttpGet = Action("ESHttpGet")
ESHttpHead = Action("ESHttpHead")
ESHttpPatch = Action("ESHttpPatch")
ESHttpPost = Action("ESHttpPost")
ESHttpPut = Action("ESHttpPut")
GetCompatibleElasticsearchVersions = Action("GetCompatibleElasticsearchVersions")
GetPackageVersionHistory = Action("GetPackageVersionHistory")
GetUpgradeHistory = Action("GetUpgradeHistory")
GetUpgradeStatus = Action("GetUpgradeStatus")
ListDomainNames = Action("ListDomainNames")
ListDomainsForPackage = Action("ListDomainsForPackage")
ListElasticsearchInstanceTypeDetails = Action("ListElasticsearchInstanceTypeDetails")
ListElasticsearchInstanceTypes = Action("ListElasticsearchInstanceTypes")
ListElasticsearchVersions = Action("ListElasticsearchVersions")
ListPackagesForDomain = Action("ListPackagesForDomain")
ListTags = Action("ListTags")
PurchaseReservedElasticsearchInstance = Action("PurchaseReservedElasticsearchInstance")
PurchaseReservedElasticsearchInstanceOffering = Action(
"PurchaseReservedElasticsearchInstanceOffering"
)
RejectInboundCrossClusterSearchConnection = Action(
"RejectInboundCrossClusterSearchConnection"
)
RemoveTags = Action("RemoveTags")
StartElasticsearchServiceSoftwareUpdate = Action(
"StartElasticsearchServiceSoftwareUpdate"
)
UpdateElasticsearchDomainConfig = Action("UpdateElasticsearchDomainConfig")
UpdatePackage = Action("UpdatePackage")
UpgradeElasticsearchDomain = Action("UpgradeElasticsearchDomain")
| bsd-2-clause |
dakcarto/QGIS | python/plugins/processing/algs/qgis/VectorLayerScatterplot.py | 15 | 3160 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EquivalentNumField.py
---------------------
Date : January 2013
Copyright : (C) 2013 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'January 2013'
__copyright__ = '(C) 2013, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import matplotlib.pyplot as plt
import matplotlib.pylab as lab
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterTableField
from processing.core.outputs import OutputHTML
from processing.tools import vector
from processing.tools import dataobjects
class VectorLayerScatterplot(GeoAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
XFIELD = 'XFIELD'
YFIELD = 'YFIELD'
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Vector layer scatterplot')
self.group, self.i18n_group = self.trAlgorithm('Graphics')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterTableField(self.XFIELD,
self.tr('X attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addParameter(ParameterTableField(self.YFIELD,
self.tr('Y attribute'), self.INPUT,
ParameterTableField.DATA_TYPE_NUMBER))
self.addOutput(OutputHTML(self.OUTPUT, self.tr('Scatterplot')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
xfieldname = self.getParameterValue(self.XFIELD)
yfieldname = self.getParameterValue(self.YFIELD)
output = self.getOutputValue(self.OUTPUT)
values = vector.values(layer, xfieldname, yfieldname)
plt.close()
plt.scatter(values[xfieldname], values[yfieldname])
plt.ylabel(yfieldname)
plt.xlabel(xfieldname)
plotFilename = output + '.png'
lab.savefig(plotFilename)
f = open(output, 'w')
f.write('<html><img src="' + plotFilename + '"/></html>')
f.close()
| gpl-2.0 |
spidercensus/py-junos-eznc | lib/jnpr/junos/__init__.py | 2 | 1451 | from jnpr.junos.device import Device
from jnpr.junos.console import Console
from jnpr.junos.factory.to_json import PyEzJSONEncoder
from jnpr.junos.facts.swver import version_info
from jnpr.junos.facts.swver import version_yaml_representer
from . import jxml
from . import jxml as JXML
from . import version
from . import exception
import json
import yaml
import logging
import sys
import warnings
if sys.version_info[:2] == (2, 6):
warnings.warn(
"Python 2.6 is no longer supported by the Python core team, please "
"upgrade your Python. A future version of PyEZ will drop "
"support for Python 2.6",
DeprecationWarning
)
__version__ = version.VERSION
__date__ = version.DATE
# import time
# __date__ = time.strftime("%Y-%b-%d")
# Set default JSON encoder
json._default_encoder = PyEzJSONEncoder()
# Disable ignore_aliases for YAML dumper
# To support version_info
yaml.dumper.SafeDumper.ignore_aliases = lambda self, data: True
yaml.dumper.Dumper.ignore_aliases = lambda self, data: True
# Add YAML representer for version_info
yaml.Dumper.add_multi_representer(version_info, version_yaml_representer)
yaml.SafeDumper.add_multi_representer(version_info, version_yaml_representer)
# Suppress Paramiko logger warnings
plog = logging.getLogger('paramiko')
if not plog.handlers:
class NullHandler(logging.Handler):
def emit(self, record):
pass
plog.addHandler(NullHandler())
| apache-2.0 |
IntersectAustralia/asvo-tao | web/tao/tests/workflow_api_tests.py | 1 | 3318 | import json
from django.core.urlresolvers import reverse
from django.test import Client, TestCase
from django.test.utils import override_settings
from tao.tests.support.factories import JobFactory, UserFactory, WorkflowCommandFactory
class WorkflowApiTest(TestCase):
def setUp(self):
super(WorkflowApiTest, self).setUp()
self.client = Client()
self.client.defaults = {'REMOTE_ADDR': '123.2.3.4'}
user = UserFactory.create()
self.job = JobFactory.create(user=user)
self.wfcommand = WorkflowCommandFactory.create(job_id=self.job, submitted_by=user, execution_status='QUEUED')
self.url_all_wfcommand = reverse('api_dispatch_list', kwargs={'resource_name': 'workflowcommand', 'api_name': 'v1'})
# print self.url_all_wfcommand
self.url_by_wf_id = reverse('api_dispatch_detail', kwargs={'resource_name': 'workflowcommand', 'api_name': 'v1', 'pk': self.wfcommand.id})
# self.url_by_job_id = reverse('api_dipatch_detail', kwargs={'resource_name': 'workflowcommand', 'api_name': 'v1'}, job_id = job.id)
# print self.url_by_job_id
self.data = {'format': 'json'}
def tearDown(self):
super(WorkflowApiTest, self).tearDown()
@override_settings(API_ALLOWED_IPS=['123.2.3.4'])
def test_allowed_user_can_read_api(self):
resp = self.client.get(self.url_all_wfcommand, data=self.data)
self.assertEqual(200, resp.status_code)
resp = self.client.get(self.url_by_wf_id, data=self.data)
self.assertEqual(200, resp.status_code)
url_by_job_id = self.url_all_wfcommand + '?job_id=' + str(self.job.id)
resp = self.client.get(url_by_job_id, data=self.data)
self.assertEqual(200, resp.status_code)
url_by_status = self.url_all_wfcommand + '?execution_status=QUEUED'
resp = self.client.get(url_by_status, data=self.data)
self.assertEqual(200, resp.status_code)
@override_settings(API_ALLOWED_IPS=['122.1.1.1'])
def test_unauthorised_user_cannot_access_api(self):
resp = self.client.get(self.url_all_wfcommand, data=self.data)
self.assertEqual(401, resp.status_code)
resp = self.client.get(self.url_by_wf_id, data=self.data)
self.assertEqual(401, resp.status_code)
url_by_job_id = self.url_all_wfcommand + '?job_id=' + str(self.job.id)
resp = self.client.get(url_by_job_id, data=self.data)
self.assertEqual(401, resp.status_code)
url_by_status = self.url_all_wfcommand + '?execution_status=QUEUED'
resp = self.client.get(url_by_status, data=self.data)
self.assertEqual(401, resp.status_code)
@override_settings(API_ALLOWED_IPS=['123.2.3.4'])
def test_allowed_user_can_update_api(self):
self.data = {'execution_comment': 'unit testing PUT'}
resp = self.client.put(self.url_by_wf_id, data=json.dumps(self.data), content_type='application/json')
self.assertEqual(204, resp.status_code)
@override_settings(API_ALLOWED_IPS=['122.1.1.1'])
def test_unauthorised_user_cannot_update_api(self):
self.data = {'execution_comment': 'unit testing PUT'}
resp = self.client.put(self.url_by_wf_id, data=json.dumps(self.data), content_type='application/json')
self.assertEqual(401, resp.status_code) | gpl-3.0 |
miguelpalacio/python-for-android | python3-alpha/python-libs/gdata/Crypto/Hash/HMAC.py | 3 | 3293 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
This is just a copy of the Python 2.2 HMAC module, modified to work when
used on versions of Python before 2.2.
"""
__revision__ = "$Id$"
import string
def _strxor(s1, s2):
"""Utility method. XOR the two strings s1 and s2 (must have same length).
"""
return "".join(map(lambda x, y: chr(ord(x) ^ ord(y)), s1, s2))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used.
digest_size = None
class HMAC:
"""RFC2104 HMAC class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod == None:
from . import md5
digestmod = md5
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
try:
self.digest_size = digestmod.digest_size
except AttributeError:
self.digest_size = len(self.outer.digest())
blocksize = 64
ipad = "\x36" * blocksize
opad = "\x5C" * blocksize
if len(key) > blocksize:
key = digestmod.new(key).digest()
key = key + chr(0) * (blocksize - len(key))
self.outer.update(_strxor(key, opad))
self.inner.update(_strxor(key, ipad))
if (msg):
self.update(msg)
## def clear(self):
## raise NotImplementedError, "clear() method not available in HMAC."
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = HMAC("")
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([string.zfill(hex(ord(x))[2:], 2)
for x in tuple(self.digest())])
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| apache-2.0 |
pradeep-aradhya/security_monkey | security_monkey/views/account.py | 6 | 12443 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from security_monkey.views import AuthenticatedService
from security_monkey.views import __check_auth__
from security_monkey.views import ACCOUNT_FIELDS
from security_monkey.datastore import Account
from security_monkey.datastore import User
from security_monkey import db
from flask.ext.restful import marshal, reqparse
class AccountGetPutDelete(AuthenticatedService):
def __init__(self):
self.reqparse = reqparse.RequestParser()
super(AccountGetPutDelete, self).__init__()
def get(self, account_id):
"""
.. http:get:: /api/1/account/<int:id>
Get a list of Accounts matching the given criteria
**Example Request**:
.. sourcecode:: http
GET /api/1/account/1 HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
third_party: false,
name: "example_name",
notes: null,
role_name: null,
number: "111111111111",
active: true,
id: 1,
s3_name: "example_name",
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 401: Authentication failure. Please login.
"""
auth, retval = __check_auth__(self.auth_dict)
if auth:
return retval
result = Account.query.filter(Account.id == account_id).first()
account_marshaled = marshal(result.__dict__, ACCOUNT_FIELDS)
account_marshaled['auth'] = self.auth_dict
return account_marshaled, 200
def put(self, account_id):
"""
.. http:put:: /api/1/account/1
Edit an existing account.
**Example Request**:
.. sourcecode:: http
PUT /api/1/account/1 HTTP/1.1
Host: example.com
Accept: application/json
{
'name': 'edited_account'
's3_name': 'edited_account',
'number': '0123456789',
'notes': 'this account is for ...',
'role_name': 'CustomRole',
'active': true,
'third_party': false
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
'name': 'edited_account'
's3_name': 'edited_account',
'number': '0123456789',
'notes': 'this account is for ...',
'role_name': 'CustomRole',
'active': true,
'third_party': false
}
:statuscode 200: no error
:statuscode 401: Authentication Error. Please Login.
"""
auth, retval = __check_auth__(self.auth_dict)
if auth:
return retval
self.reqparse.add_argument('name', required=False, type=unicode, help='Must provide account name', location='json')
self.reqparse.add_argument('s3_name', required=False, type=unicode, help='Will use name if s3_name not provided.', location='json')
self.reqparse.add_argument('number', required=False, type=unicode, help='Add the account number if available.', location='json')
self.reqparse.add_argument('notes', required=False, type=unicode, help='Add context.', location='json')
self.reqparse.add_argument('role_name', required=False, type=unicode, help='Custom role name.', location='json')
self.reqparse.add_argument('active', required=False, type=bool, help='Determines whether this account should be interrogated by security monkey.', location='json')
self.reqparse.add_argument('third_party', required=False, type=bool, help='Determines whether this account is a known friendly third party account.', location='json')
args = self.reqparse.parse_args()
account = Account.query.filter(Account.id == account_id).first()
if not account:
return {'status': 'error. Account ID not found.'}, 404
account.name = args['name']
account.s3_name = args['s3_name']
account.number = args['number']
account.notes = args['notes']
account.role_name = args['role_name']
account.active = args['active']
account.third_party = args['third_party']
db.session.add(account)
db.session.commit()
db.session.refresh(account)
marshaled_account = marshal(account.__dict__, ACCOUNT_FIELDS)
marshaled_account['auth'] = self.auth_dict
return marshaled_account, 200
def delete(self, account_id):
"""
.. http:delete:: /api/1/account/1
Delete an account.
**Example Request**:
.. sourcecode:: http
DELETE /api/1/account/1 HTTP/1.1
Host: example.com
Accept: application/json
**Example Response**:
.. sourcecode:: http
HTTP/1.1 202 Accepted
Vary: Accept
Content-Type: application/json
{
'status': 'deleted'
}
:statuscode 202: accepted
:statuscode 401: Authentication Error. Please Login.
"""
auth, retval = __check_auth__(self.auth_dict)
if auth:
return retval
# Need to unsubscribe any users first:
users = User.query.filter(User.accounts.any(Account.id == account_id)).all()
for user in users:
user.accounts = [account for account in user.accounts if not account.id == account_id]
db.session.add(user)
db.session.commit()
account = Account.query.filter(Account.id == account_id).first()
db.session.delete(account)
db.session.commit()
return {'status': 'deleted'}, 202
class AccountPostList(AuthenticatedService):
def __init__(self):
super(AccountPostList, self).__init__()
self.reqparse = reqparse.RequestParser()
def post(self):
"""
.. http:post:: /api/1/account/
Create a new account.
**Example Request**:
.. sourcecode:: http
POST /api/1/account/ HTTP/1.1
Host: example.com
Accept: application/json
{
'name': 'new_account'
's3_name': 'new_account',
'number': '0123456789',
'notes': 'this account is for ...',
'role_name': 'CustomRole',
'active': true,
'third_party': false
}
**Example Response**:
.. sourcecode:: http
HTTP/1.1 201 Created
Vary: Accept
Content-Type: application/json
{
'name': 'new_account'
's3_name': 'new_account',
'number': '0123456789',
'notes': 'this account is for ...',
'role_name': 'CustomRole',
'active': true,
'third_party': false
}
:statuscode 201: created
:statuscode 401: Authentication Error. Please Login.
"""
auth, retval = __check_auth__(self.auth_dict)
if auth:
return retval
self.reqparse.add_argument('name', required=True, type=unicode, help='Must provide account name', location='json')
self.reqparse.add_argument('s3_name', required=False, type=unicode, help='Will use name if s3_name not provided.', location='json')
self.reqparse.add_argument('number', required=False, type=unicode, help='Add the account number if available.', location='json')
self.reqparse.add_argument('notes', required=False, type=unicode, help='Add context.', location='json')
self.reqparse.add_argument('role_name', required=False, type=unicode, help='Custom role name.', location='json')
self.reqparse.add_argument('active', required=False, type=bool, help='Determines whether this account should be interrogated by security monkey.', location='json')
self.reqparse.add_argument('third_party', required=False, type=bool, help='Determines whether this account is a known friendly third party account.', location='json')
args = self.reqparse.parse_args()
account = Account()
account.name = args['name']
account.s3_name = args.get('s3_name', args['name'])
account.number = args['number']
account.notes = args['notes']
account.active = args['active']
account.third_party = args['third_party']
db.session.add(account)
db.session.commit()
db.session.refresh(account)
marshaled_account = marshal(account.__dict__, ACCOUNT_FIELDS)
marshaled_account['auth'] = self.auth_dict
return marshaled_account, 201
def get(self):
"""
.. http:get:: /api/1/accounts
Get a list of Accounts matching the given criteria
**Example Request**:
.. sourcecode:: http
GET /api/1/accounts HTTP/1.1
Host: example.com
Accept: application/json, text/javascript
**Example Response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Vary: Accept
Content-Type: application/json
{
count: 1,
items: [
{
third_party: false,
name: "example_name",
notes: null,
role_name: null,
number: "111111111111",
active: true,
id: 1,
s3_name: "example_name"
},
],
total: 1,
page: 1,
auth: {
authenticated: true,
user: "user@example.com"
}
}
:statuscode 200: no error
:statuscode 401: Authentication failure. Please login.
"""
auth, retval = __check_auth__(self.auth_dict)
if auth:
return retval
self.reqparse.add_argument('count', type=int, default=30, location='args')
self.reqparse.add_argument('page', type=int, default=1, location='args')
args = self.reqparse.parse_args()
page = args.pop('page', None)
count = args.pop('count', None)
result = Account.query.order_by(Account.id).paginate(page, count, error_out=False)
items = []
for account in result.items:
account_marshaled = marshal(account.__dict__, ACCOUNT_FIELDS)
items.append(account_marshaled)
marshaled_dict = {
'total': result.total,
'count': len(items),
'page': result.page,
'items': items,
'auth': self.auth_dict
}
return marshaled_dict, 200
| apache-2.0 |
ojengwa/oh-mainline | vendor/packages/docutils/docutils/transforms/universal.py | 106 | 10355 | # $Id: universal.py 7668 2013-06-04 12:46:30Z milde $
# -*- coding: utf-8 -*-
# Authors: David Goodger <goodger@python.org>; Ueli Schlaepfer; Günter Milde
# Maintainer: docutils-develop@lists.sourceforge.net
# Copyright: This module has been placed in the public domain.
"""
Transforms needed by most or all documents:
- `Decorations`: Generate a document's header & footer.
- `Messages`: Placement of system messages stored in
`nodes.document.transform_messages`.
- `TestMessages`: Like `Messages`, used on test runs.
- `FinalReferences`: Resolve remaining references.
"""
__docformat__ = 'reStructuredText'
import re
import sys
import time
from docutils import nodes, utils
from docutils.transforms import TransformError, Transform
from docutils.utils import smartquotes
class Decorations(Transform):
"""
Populate a document's decoration element (header, footer).
"""
default_priority = 820
def apply(self):
header_nodes = self.generate_header()
if header_nodes:
decoration = self.document.get_decoration()
header = decoration.get_header()
header.extend(header_nodes)
footer_nodes = self.generate_footer()
if footer_nodes:
decoration = self.document.get_decoration()
footer = decoration.get_footer()
footer.extend(footer_nodes)
def generate_header(self):
return None
def generate_footer(self):
# @@@ Text is hard-coded for now.
# Should be made dynamic (language-dependent).
settings = self.document.settings
if settings.generator or settings.datestamp or settings.source_link \
or settings.source_url:
text = []
if settings.source_link and settings._source \
or settings.source_url:
if settings.source_url:
source = settings.source_url
else:
source = utils.relative_path(settings._destination,
settings._source)
text.extend([
nodes.reference('', 'View document source',
refuri=source),
nodes.Text('.\n')])
if settings.datestamp:
datestamp = time.strftime(settings.datestamp, time.gmtime())
text.append(nodes.Text('Generated on: ' + datestamp + '.\n'))
if settings.generator:
text.extend([
nodes.Text('Generated by '),
nodes.reference('', 'Docutils', refuri=
'http://docutils.sourceforge.net/'),
nodes.Text(' from '),
nodes.reference('', 'reStructuredText', refuri='http://'
'docutils.sourceforge.net/rst.html'),
nodes.Text(' source.\n')])
return [nodes.paragraph('', '', *text)]
else:
return None
class ExposeInternals(Transform):
"""
Expose internal attributes if ``expose_internals`` setting is set.
"""
default_priority = 840
def not_Text(self, node):
return not isinstance(node, nodes.Text)
def apply(self):
if self.document.settings.expose_internals:
for node in self.document.traverse(self.not_Text):
for att in self.document.settings.expose_internals:
value = getattr(node, att, None)
if value is not None:
node['internal:' + att] = value
class Messages(Transform):
"""
Place any system messages generated after parsing into a dedicated section
of the document.
"""
default_priority = 860
def apply(self):
unfiltered = self.document.transform_messages
threshold = self.document.reporter.report_level
messages = []
for msg in unfiltered:
if msg['level'] >= threshold and not msg.parent:
messages.append(msg)
if messages:
section = nodes.section(classes=['system-messages'])
# @@@ get this from the language module?
section += nodes.title('', 'Docutils System Messages')
section += messages
self.document.transform_messages[:] = []
self.document += section
class FilterMessages(Transform):
"""
Remove system messages below verbosity threshold.
"""
default_priority = 870
def apply(self):
for node in self.document.traverse(nodes.system_message):
if node['level'] < self.document.reporter.report_level:
node.parent.remove(node)
class TestMessages(Transform):
"""
Append all post-parse system messages to the end of the document.
Used for testing purposes.
"""
default_priority = 880
def apply(self):
for msg in self.document.transform_messages:
if not msg.parent:
self.document += msg
class StripComments(Transform):
"""
Remove comment elements from the document tree (only if the
``strip_comments`` setting is enabled).
"""
default_priority = 740
def apply(self):
if self.document.settings.strip_comments:
for node in self.document.traverse(nodes.comment):
node.parent.remove(node)
class StripClassesAndElements(Transform):
"""
Remove from the document tree all elements with classes in
`self.document.settings.strip_elements_with_classes` and all "classes"
attribute values in `self.document.settings.strip_classes`.
"""
default_priority = 420
def apply(self):
if not (self.document.settings.strip_elements_with_classes
or self.document.settings.strip_classes):
return
# prepare dicts for lookup (not sets, for Python 2.2 compatibility):
self.strip_elements = dict(
[(key, None)
for key in (self.document.settings.strip_elements_with_classes
or [])])
self.strip_classes = dict(
[(key, None) for key in (self.document.settings.strip_classes
or [])])
for node in self.document.traverse(self.check_classes):
node.parent.remove(node)
def check_classes(self, node):
if isinstance(node, nodes.Element):
for class_value in node['classes'][:]:
if class_value in self.strip_classes:
node['classes'].remove(class_value)
if class_value in self.strip_elements:
return 1
class SmartQuotes(Transform):
"""
Replace ASCII quotation marks with typographic form.
Also replace multiple dashes with em-dash/en-dash characters.
"""
default_priority = 850
def __init__(self, document, startnode):
Transform.__init__(self, document, startnode=startnode)
self.unsupported_languages = set()
def get_tokens(self, txtnodes):
# A generator that yields ``(texttype, nodetext)`` tuples for a list
# of "Text" nodes (interface to ``smartquotes.educate_tokens()``).
texttype = {True: 'literal', # "literal" text is not changed:
False: 'plain'}
for txtnode in txtnodes:
nodetype = texttype[isinstance(txtnode.parent,
(nodes.literal,
nodes.math,
nodes.image,
nodes.raw,
nodes.problematic))]
yield (nodetype, txtnode.astext())
def apply(self):
smart_quotes = self.document.settings.smart_quotes
if not smart_quotes:
return
try:
alternative = smart_quotes.startswith('alt')
except AttributeError:
alternative = False
# print repr(alternative)
document_language = self.document.settings.language_code
# "Educate" quotes in normal text. Handle each block of text
# (TextElement node) as a unit to keep context around inline nodes:
for node in self.document.traverse(nodes.TextElement):
# skip preformatted text blocks and special elements:
if isinstance(node, (nodes.FixedTextElement, nodes.Special)):
continue
# nested TextElements are not "block-level" elements:
if isinstance(node.parent, nodes.TextElement):
continue
# list of text nodes in the "text block":
txtnodes = [txtnode for txtnode in node.traverse(nodes.Text)
if not isinstance(txtnode.parent,
nodes.option_string)]
# language: use typographical quotes for language "lang"
lang = node.get_language_code(document_language)
# use alternative form if `smart-quotes` setting starts with "alt":
if alternative:
if '-x-altquot' in lang:
lang = lang.replace('-x-altquot', '')
else:
lang += '-x-altquot'
# drop subtags missing in quotes:
for tag in utils.normalize_language_tag(lang):
if tag in smartquotes.smartchars.quotes:
lang = tag
break
else: # language not supported: (keep ASCII quotes)
if lang not in self.unsupported_languages:
self.document.reporter.warning('No smart quotes '
'defined for language "%s".'%lang, base_node=node)
self.unsupported_languages.add(lang)
lang = ''
# Iterator educating quotes in plain text:
# '2': set all, using old school en- and em- dash shortcuts
teacher = smartquotes.educate_tokens(self.get_tokens(txtnodes),
attr='2', language=lang)
for txtnode, newtext in zip(txtnodes, teacher):
txtnode.parent.replace(txtnode, nodes.Text(newtext))
self.unsupported_languages = set() # reset
| agpl-3.0 |
sstrigger/Sick-Beard | sickbeard/providers/btn.py | 12 | 13919 | # coding=utf-8
# Author: Dani�l Heimans
# URL: http://code.google.com/p/sickbeard
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
import generic
from sickbeard import classes
from sickbeard import scene_exceptions
from sickbeard import logger
from sickbeard import tvcache
from sickbeard.helpers import sanitizeSceneName
from sickbeard.common import Quality
from sickbeard.exceptions import ex, AuthException
from lib import jsonrpclib
from datetime import datetime
import time
import socket
import math
class BTNProvider(generic.TorrentProvider):
def __init__(self):
generic.TorrentProvider.__init__(self, "BTN")
self.supportsBacklog = True
self.cache = BTNCache(self)
self.url = "http://broadcasthe.net"
def isEnabled(self):
return sickbeard.BTN
def imageName(self):
return 'btn.png'
def _checkAuth(self):
if not sickbeard.BTN_API_KEY:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _checkAuthFromData(self, parsedJSON):
if parsedJSON is None:
return self._checkAuth()
if 'api-error' in parsedJSON:
logger.log(u"Incorrect authentication credentials for " + self.name + " : " + parsedJSON['api-error'], logger.DEBUG)
raise AuthException("Your authentication credentials for " + self.name + " are incorrect, check your config.")
return True
def _doSearch(self, search_params, show=None, age=0):
self._checkAuth()
params = {}
apikey = sickbeard.BTN_API_KEY
# age in seconds
if age:
params['age'] = "<=" + str(int(age))
if search_params:
params.update(search_params)
parsedJSON = self._api_call(apikey, params)
if not parsedJSON:
logger.log(u"No data returned from " + self.name, logger.ERROR)
return []
if self._checkAuthFromData(parsedJSON):
if 'torrents' in parsedJSON:
found_torrents = parsedJSON['torrents']
else:
found_torrents = {}
# We got something, we know the API sends max 1000 results at a time.
# See if there are more than 1000 results for our query, if not we
# keep requesting until we've got everything.
# max 150 requests per hour so limit at that. Scan every 15 minutes. 60 / 15 = 4.
max_pages = 35
results_per_page = 1000
if 'results' in parsedJSON and int(parsedJSON['results']) >= results_per_page:
pages_needed = int(math.ceil(int(parsedJSON['results']) / results_per_page))
if pages_needed > max_pages:
pages_needed = max_pages
# +1 because range(1,4) = 1, 2, 3
for page in range(1, pages_needed + 1):
parsedJSON = self._api_call(apikey, params, results_per_page, page * results_per_page)
# Note that this these are individual requests and might time out individually. This would result in 'gaps'
# in the results. There is no way to fix this though.
if 'torrents' in parsedJSON:
found_torrents.update(parsedJSON['torrents'])
results = []
for torrentid, torrent_info in found_torrents.iteritems(): # @UnusedVariable
(title, url) = self._get_title_and_url(torrent_info)
if title and url:
results.append(torrent_info)
return results
return []
def _api_call(self, apikey, params={}, results_per_page=1000, offset=0):
server = jsonrpclib.Server('http://api.btnapps.net')
parsedJSON = {}
try:
parsedJSON = server.getTorrents(apikey, params, int(results_per_page), int(offset))
except jsonrpclib.jsonrpc.ProtocolError, error:
logger.log(u"JSON-RPC protocol error while accessing " + self.name + ": " + ex(error), logger.ERROR)
parsedJSON = {'api-error': ex(error)}
return parsedJSON
except socket.timeout:
logger.log(u"Timeout while accessing " + self.name, logger.WARNING)
except socket.error, error:
# Note that sometimes timeouts are thrown as socket errors
logger.log(u"Socket error while accessing " + self.name + ": " + error[1], logger.ERROR)
except Exception, error:
errorstring = str(error)
if(errorstring.startswith('<') and errorstring.endswith('>')):
errorstring = errorstring[1:-1]
logger.log(u"Unknown error while accessing " + self.name + ": " + errorstring, logger.ERROR)
return parsedJSON
def _get_title_and_url(self, parsedJSON):
# The BTN API gives a lot of information in response,
# however Sick Beard is built mostly around Scene or
# release names, which is why we are using them here.
if 'ReleaseName' in parsedJSON and parsedJSON['ReleaseName']:
title = parsedJSON['ReleaseName']
else:
# If we don't have a release name we need to get creative
title = u''
if 'Series' in parsedJSON:
title += parsedJSON['Series']
if 'GroupName' in parsedJSON:
title += '.' + parsedJSON['GroupName'] if title else parsedJSON['GroupName']
if 'Resolution' in parsedJSON:
title += '.' + parsedJSON['Resolution'] if title else parsedJSON['Resolution']
if 'Source' in parsedJSON:
title += '.' + parsedJSON['Source'] if title else parsedJSON['Source']
if 'Codec' in parsedJSON:
title += '.' + parsedJSON['Codec'] if title else parsedJSON['Codec']
if title:
title = title.replace(' ', '.')
url = None
if 'DownloadURL' in parsedJSON:
url = parsedJSON['DownloadURL']
if url:
# unescaped / is valid in JSON, but it can be escaped
url = url.replace("\\/", "/")
return (title, url)
def _get_season_search_strings(self, show, season=None):
if not show:
return [{}]
search_params = []
name_exceptions = scene_exceptions.get_scene_exceptions(show.tvdbid) + [show.name]
for name in name_exceptions:
current_params = {}
if show.tvdbid:
current_params['tvdb'] = show.tvdbid
elif show.tvrid:
current_params['tvrage'] = show.tvrid
else:
# Search by name if we don't have tvdb or tvrage id
current_params['series'] = sanitizeSceneName(name)
if season is not None:
whole_season_params = current_params.copy()
partial_season_params = current_params.copy()
# Search for entire seasons: no need to do special things for air by date shows
whole_season_params['category'] = 'Season'
whole_season_params['name'] = 'Season ' + str(season)
if whole_season_params not in search_params:
search_params.append(whole_season_params)
# Search for episodes in the season
partial_season_params['category'] = 'Episode'
if show.air_by_date:
# Search for the year of the air by date show
partial_season_params['name'] = str(season.split('-')[0]) + "%"
else:
# Search for any result which has Sxx in the name
partial_season_params['name'] = "S" + str(season).zfill(2) + "%"
if partial_season_params not in search_params:
search_params.append(partial_season_params)
else:
if current_params not in search_params:
search_params.append(current_params)
return search_params
def _get_episode_search_strings(self, ep_obj):
if not ep_obj:
return [{}]
search_params = {'category': 'Episode'}
if ep_obj.show.tvdbid:
search_params['tvdb'] = ep_obj.show.tvdbid
elif ep_obj.show.tvrid:
search_params['tvrage'] = ep_obj.show.rid
else:
search_params['series'] = sanitizeSceneName(ep_obj.show_name)
if ep_obj.show.air_by_date:
date_str = str(ep_obj.airdate)
# BTN uses dots in dates, we just search for the date since that
# combined with the series identifier should result in just one episode
search_params['name'] = date_str.replace('-', '.')
else:
# Do a general name search for the episode, formatted like SXXEYY
search_params['name'] = "S%02dE%02d" % (ep_obj.season, ep_obj.episode)
to_return = [search_params]
# only do scene exceptions if we are searching by name
if 'series' in search_params:
# add new query string for every exception
name_exceptions = scene_exceptions.get_scene_exceptions(ep_obj.show.tvdbid)
for cur_exception in name_exceptions:
# don't add duplicates
if cur_exception == ep_obj.show.name:
continue
# copy all other parameters before setting the show name for this exception
cur_return = search_params.copy()
cur_return['series'] = sanitizeSceneName(cur_exception)
to_return.append(cur_return)
return to_return
def _doGeneralSearch(self, search_string):
# 'search' looks as broad is it can find. Can contain episode overview and title for example,
# use with caution!
return self._doSearch({'search': search_string})
def findPropers(self, search_date=None):
results = []
search_terms = ['%.proper.%', '%.repack.%']
for term in search_terms:
for item in self._doSearch({'release': term}, age=4 * 24 * 60 * 60):
if item['Time']:
try:
result_date = datetime.fromtimestamp(float(item['Time']))
except TypeError:
result_date = None
if result_date:
if not search_date or result_date > search_date:
title, url = self._get_title_and_url(item)
results.append(classes.Proper(title, url, result_date))
return results
class BTNCache(tvcache.TVCache):
def __init__(self, provider):
tvcache.TVCache.__init__(self, provider)
# At least 15 minutes between queries
self.minTime = 15
def updateCache(self):
if not self.shouldUpdate():
return
if self._checkAuth(None):
data = self._getRSSData()
# As long as we got something from the provider we count it as an update
if data:
self.setLastUpdate()
else:
return []
logger.log(u"Clearing " + self.provider.name + " cache and updating with new information")
self._clearCache()
if self._checkAuth(data):
# By now we know we've got data and no auth errors, all we need to do is put it in the database
for item in data:
self._parseItem(item)
else:
raise AuthException("Your authentication info for " + self.provider.name + " is incorrect, check your config")
else:
return []
def _getRSSData(self):
# Get the torrents uploaded since last check.
seconds_since_last_update = math.ceil(time.time() - time.mktime(self._getLastUpdate().timetuple()))
# default to 15 minutes
seconds_minTime = self.minTime * 60
if seconds_since_last_update < seconds_minTime:
seconds_since_last_update = seconds_minTime
# Set maximum to 24 hours (24 * 60 * 60 = 86400 seconds) of "RSS" data search, older things will need to be done through backlog
if seconds_since_last_update > 86400:
logger.log(u"The last known successful update on " + self.provider.name + " was more than 24 hours ago, only trying to fetch the last 24 hours!", logger.WARNING)
seconds_since_last_update = 86400
data = self.provider._doSearch(search_params=None, age=seconds_since_last_update)
return data
def _parseItem(self, item):
(title, url) = self.provider._get_title_and_url(item)
if title and url:
logger.log(u"Adding item to results: " + title, logger.DEBUG)
self._addCacheEntry(title, url)
else:
logger.log(u"The data returned from the " + self.provider.name + " is incomplete, this result is unusable", logger.ERROR)
return
def _checkAuth(self, data):
return self.provider._checkAuthFromData(data)
provider = BTNProvider()
| gpl-3.0 |
benhoff/listenerplugins | listenerplugins/profiling.py | 17 | 3087 | import asyncio
import os
import signal
import threading
import traceback
import sys
PYMPLER_ENABLED = False
if PYMPLER_ENABLED:
try:
import pympler
import pympler.muppy
import pympler.summary
import pympler.tracker
except ImportError:
pympler = None
else:
pympler = None
try:
import objgraph
except ImportError:
objgraph = None
from cloudbot import hook
from cloudbot.util import web
def get_name(thread_id):
current_thread = threading.current_thread()
if thread_id == current_thread._ident:
is_current = True
thread = current_thread
else:
is_current = False
thread = threading._active.get(thread_id)
if thread is not None:
if thread.name is not None:
name = thread.name
else:
name = "Unnamed thread"
else:
name = "Unknown thread"
name = "{} ({})".format(name, thread_id)
if is_current:
name += " - Current thread"
return name
def get_thread_dump():
code = []
threads = [(get_name(thread_id), traceback.extract_stack(stack))
for thread_id, stack in sys._current_frames().items()]
for thread_name, stack in threads:
code.append("# {}".format(thread_name))
for filename, line_num, name, line in stack:
code.append("{}:{} - {}".format(filename, line_num, name))
if line:
code.append(" {}".format(line.strip()))
code.append("") # new line
return web.paste("\n".join(code), ext='txt')
@asyncio.coroutine
@hook.command("threaddump", autohelp=False, permissions=["botcontrol"])
def threaddump_command():
return get_thread_dump()
@hook.command("objtypes", autohelp=False, permissions=["botcontrol"])
def show_types():
if objgraph is None:
return "objgraph not installed"
objgraph.show_most_common_types(limit=20)
return "Printed to console"
@hook.command("objgrowth", autohelp=False, permissions=["botcontrol"])
def show_growth():
if objgraph is None:
return "objgraph not installed"
objgraph.show_growth(limit=10)
return "Printed to console"
@hook.command("pymsummary", autohelp=False, permissions=["botcontrol"])
def pympler_summary():
if pympler is None:
return "pympler not installed / not enabled"
all_objects = pympler.muppy.get_objects()
summ = pympler.summary.summarize(all_objects)
pympler.summary.print_(summ)
return "Printed to console"
@hook.on_start()
def create_tracker():
if pympler is None:
return
global tr
tr = pympler.tracker.SummaryTracker()
@hook.command("pymdiff", autohelp=False, permissions=["botcontrol"])
def pympler_diff():
if pympler is None:
return "pympler not installed / not enabled"
tr.print_diff()
return "Printed to console"
# # Provide an easy way to get a threaddump, by using SIGUSR1 (only on POSIX systems)
if os.name == "posix":
def debug():
print(get_thread_dump())
signal.signal(signal.SIGUSR1, debug) # Register handler
| gpl-3.0 |
hashem78/namebench | nb_third_party/dns/dnssec.py | 215 | 2144 | # Copyright (C) 2003-2007, 2009 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Common DNSSEC-related functions and constants."""
RSAMD5 = 1
DH = 2
DSA = 3
ECC = 4
RSASHA1 = 5
DSANSEC3SHA1 = 6
RSASHA1NSEC3SHA1 = 7
RSASHA256 = 8
RSASHA512 = 10
INDIRECT = 252
PRIVATEDNS = 253
PRIVATEOID = 254
_algorithm_by_text = {
'RSAMD5' : RSAMD5,
'DH' : DH,
'DSA' : DSA,
'ECC' : ECC,
'RSASHA1' : RSASHA1,
'DSANSEC3SHA1' : DSANSEC3SHA1,
'RSASHA1NSEC3SHA1' : RSASHA1NSEC3SHA1,
'RSASHA256' : RSASHA256,
'RSASHA512' : RSASHA512,
'INDIRECT' : INDIRECT,
'PRIVATEDNS' : PRIVATEDNS,
'PRIVATEOID' : PRIVATEOID,
}
# We construct the inverse mapping programmatically to ensure that we
# cannot make any mistakes (e.g. omissions, cut-and-paste errors) that
# would cause the mapping not to be true inverse.
_algorithm_by_value = dict([(y, x) for x, y in _algorithm_by_text.iteritems()])
class UnknownAlgorithm(Exception):
"""Raised if an algorithm is unknown."""
pass
def algorithm_from_text(text):
"""Convert text into a DNSSEC algorithm value
@rtype: int"""
value = _algorithm_by_text.get(text.upper())
if value is None:
value = int(text)
return value
def algorithm_to_text(value):
"""Convert a DNSSEC algorithm value to text
@rtype: string"""
text = _algorithm_by_value.get(value)
if text is None:
text = str(value)
return text
| apache-2.0 |
amboxer21/scrapy | scrapy/downloadermiddlewares/retry.py | 108 | 3570 | """
An extension to retry failed requests that are potentially caused by temporary
problems such as a connection timeout or HTTP 500 error.
You can change the behaviour of this middleware by modifing the scraping settings:
RETRY_TIMES - how many times to retry a failed page
RETRY_HTTP_CODES - which HTTP response codes to retry
Failed pages are collected on the scraping process and rescheduled at the end,
once the spider has finished crawling all regular (non failed) pages. Once
there is no more failed pages to retry this middleware sends a signal
(retry_complete), so other extensions could connect to that signal.
About HTTP errors to consider:
- You may want to remove 400 from RETRY_HTTP_CODES, if you stick to the HTTP
protocol. It's included by default because it's a common code used to
indicate server overload, which would be something we want to retry
"""
import logging
from twisted.internet import defer
from twisted.internet.error import TimeoutError, DNSLookupError, \
ConnectionRefusedError, ConnectionDone, ConnectError, \
ConnectionLost, TCPTimedOutError
from scrapy.exceptions import NotConfigured
from scrapy.utils.response import response_status_message
from scrapy.xlib.tx import ResponseFailed
logger = logging.getLogger(__name__)
class RetryMiddleware(object):
# IOError is raised by the HttpCompression middleware when trying to
# decompress an empty response
EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
ConnectionRefusedError, ConnectionDone, ConnectError,
ConnectionLost, TCPTimedOutError, ResponseFailed,
IOError)
def __init__(self, settings):
if not settings.getbool('RETRY_ENABLED'):
raise NotConfigured
self.max_retry_times = settings.getint('RETRY_TIMES')
self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def process_response(self, request, response, spider):
if request.meta.get('dont_retry', False):
return response
if response.status in self.retry_http_codes:
reason = response_status_message(response.status)
return self._retry(request, reason, spider) or response
return response
def process_exception(self, request, exception, spider):
if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
and not request.meta.get('dont_retry', False):
return self._retry(request, exception, spider)
def _retry(self, request, reason, spider):
retries = request.meta.get('retry_times', 0) + 1
if retries <= self.max_retry_times:
logger.debug("Retrying %(request)s (failed %(retries)d times): %(reason)s",
{'request': request, 'retries': retries, 'reason': reason},
extra={'spider': spider})
retryreq = request.copy()
retryreq.meta['retry_times'] = retries
retryreq.dont_filter = True
retryreq.priority = request.priority + self.priority_adjust
return retryreq
else:
logger.debug("Gave up retrying %(request)s (failed %(retries)d times): %(reason)s",
{'request': request, 'retries': retries, 'reason': reason},
extra={'spider': spider})
| bsd-3-clause |
arthru/OpenUpgrade | addons/l10n_be_invoice_bba/partner.py | 379 | 2268 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Created by Luc De Meyer
# Copyright (c) 2010 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import time
from openerp.tools.translate import _
class res_partner(osv.osv):
""" add field to indicate default 'Communication Type' on customer invoices """
_inherit = 'res.partner'
def _get_comm_type(self, cr, uid, context=None):
res = self.pool.get('account.invoice')._get_reference_type(cr, uid,context=context)
return res
_columns = {
'out_inv_comm_type': fields.selection(_get_comm_type, 'Communication Type', change_default=True,
help='Select Default Communication Type for Outgoing Invoices.' ),
'out_inv_comm_algorithm': fields.selection([
('random','Random'),
('date','Date'),
('partner_ref','Customer Reference'),
], 'Communication Algorithm',
help='Select Algorithm to generate the Structured Communication on Outgoing Invoices.' ),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + \
['out_inv_comm_type', 'out_inv_comm_algorithm']
_default = {
'out_inv_comm_type': 'none',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jonludlam/xen | tools/python/xen/util/dictio.py | 49 | 1792 | #===========================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 International Business Machines Corp.
# Author: Bryan D. Payne <bdpayne@us.ibm.com>
#============================================================================
def dict_read(dictname, filename):
"""Loads <filename> and returns the dictionary named <dictname> from
the file.
"""
dict = {}
# read in the config file
globs = {}
locs = {}
execfile(filename, globs, locs)
for (k, v) in locs.items():
if k == dictname:
dict = v
break
return dict
def dict_write(dict, dictname, filename):
"""Writes <dict> to <filename> using the name <dictname>. If the file
contains any other data, it will be overwritten.
"""
prefix = dictname + " = {\n"
suffix = "}\n"
fd = open(filename, "wb")
fd.write(prefix)
for key in dict:
line = " '" + str(key) + "': " + str(dict[key]) + ",\n"
fd.write(line)
fd.write(suffix)
fd.close()
| gpl-2.0 |
umitproject/site-status | django/contrib/localflavor/br/forms.py | 308 | 5803 | # -*- coding: utf-8 -*-
"""
BR-specific Form helpers
"""
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField, CharField, Select
from django.utils.encoding import smart_unicode
from django.utils.translation import ugettext_lazy as _
import re
phone_digits_re = re.compile(r'^(\d{2})[-\.]?(\d{4})[-\.]?(\d{4})$')
class BRZipCodeField(RegexField):
default_error_messages = {
'invalid': _('Enter a zip code in the format XXXXX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(BRZipCodeField, self).__init__(r'^\d{5}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
class BRPhoneNumberField(Field):
default_error_messages = {
'invalid': _('Phone numbers must be in XX-XXXX-XXXX format.'),
}
def clean(self, value):
super(BRPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
value = re.sub('(\(|\)|\s+)', '', smart_unicode(value))
m = phone_digits_re.search(value)
if m:
return u'%s-%s-%s' % (m.group(1), m.group(2), m.group(3))
raise ValidationError(self.error_messages['invalid'])
class BRStateSelect(Select):
"""
A Select widget that uses a list of Brazilian states/territories
as its choices.
"""
def __init__(self, attrs=None):
from br_states import STATE_CHOICES
super(BRStateSelect, self).__init__(attrs, choices=STATE_CHOICES)
class BRStateChoiceField(Field):
"""
A choice field that uses a list of Brazilian states as its choices.
"""
widget = Select
default_error_messages = {
'invalid': _(u'Select a valid brazilian state. That state is not one of the available states.'),
}
def __init__(self, required=True, widget=None, label=None,
initial=None, help_text=None):
super(BRStateChoiceField, self).__init__(required, widget, label,
initial, help_text)
from br_states import STATE_CHOICES
self.widget.choices = STATE_CHOICES
def clean(self, value):
value = super(BRStateChoiceField, self).clean(value)
if value in EMPTY_VALUES:
value = u''
value = smart_unicode(value)
if value == u'':
return value
valid_values = set([smart_unicode(k) for k, v in self.widget.choices])
if value not in valid_values:
raise ValidationError(self.error_messages['invalid'])
return value
def DV_maker(v):
if v >= 2:
return 11 - v
return 0
class BRCPFField(CharField):
"""
This field validate a CPF number or a CPF string. A CPF number is
compounded by XXX.XXX.XXX-VD. The two last digits are check digits.
More information:
http://en.wikipedia.org/wiki/Cadastro_de_Pessoas_F%C3%ADsicas
"""
default_error_messages = {
'invalid': _("Invalid CPF number."),
'max_digits': _("This field requires at most 11 digits or 14 characters."),
'digits_only': _("This field requires only numbers."),
}
def __init__(self, *args, **kwargs):
super(BRCPFField, self).__init__(max_length=14, min_length=11, *args, **kwargs)
def clean(self, value):
"""
Value can be either a string in the format XXX.XXX.XXX-XX or an
11-digit number.
"""
value = super(BRCPFField, self).clean(value)
if value in EMPTY_VALUES:
return u''
orig_value = value[:]
if not value.isdigit():
value = re.sub("[-\.]", "", value)
try:
int(value)
except ValueError:
raise ValidationError(self.error_messages['digits_only'])
if len(value) != 11:
raise ValidationError(self.error_messages['max_digits'])
orig_dv = value[-2:]
new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(10, 1, -1))])
new_1dv = DV_maker(new_1dv % 11)
value = value[:-2] + str(new_1dv) + value[-1]
new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(11, 1, -1))])
new_2dv = DV_maker(new_2dv % 11)
value = value[:-1] + str(new_2dv)
if value[-2:] != orig_dv:
raise ValidationError(self.error_messages['invalid'])
return orig_value
class BRCNPJField(Field):
default_error_messages = {
'invalid': _("Invalid CNPJ number."),
'digits_only': _("This field requires only numbers."),
'max_digits': _("This field requires at least 14 digits"),
}
def clean(self, value):
"""
Value can be either a string in the format XX.XXX.XXX/XXXX-XX or a
group of 14 characters.
"""
value = super(BRCNPJField, self).clean(value)
if value in EMPTY_VALUES:
return u''
orig_value = value[:]
if not value.isdigit():
value = re.sub("[-/\.]", "", value)
try:
int(value)
except ValueError:
raise ValidationError(self.error_messages['digits_only'])
if len(value) != 14:
raise ValidationError(self.error_messages['max_digits'])
orig_dv = value[-2:]
new_1dv = sum([i * int(value[idx]) for idx, i in enumerate(range(5, 1, -1) + range(9, 1, -1))])
new_1dv = DV_maker(new_1dv % 11)
value = value[:-2] + str(new_1dv) + value[-1]
new_2dv = sum([i * int(value[idx]) for idx, i in enumerate(range(6, 1, -1) + range(9, 1, -1))])
new_2dv = DV_maker(new_2dv % 11)
value = value[:-1] + str(new_2dv)
if value[-2:] != orig_dv:
raise ValidationError(self.error_messages['invalid'])
return orig_value
| agpl-3.0 |
pataquets/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/unittestresults.py | 155 | 2347 | # Copyright (c) 2012, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import xml.dom.minidom
_log = logging.getLogger(__name__)
class UnitTestResults(object):
@classmethod
def results_from_string(self, string):
if not string:
return None
try:
dom = xml.dom.minidom.parseString(string)
failures = []
for testcase in dom.getElementsByTagName('testcase'):
if testcase.getElementsByTagName('failure').length != 0:
testname = testcase.getAttribute('name')
classname = testcase.getAttribute('classname')
failures.append("%s.%s" % (classname, testname))
return failures
except xml.parsers.expat.ExpatError, e:
_log.error("XML error %s parsing unit test output" % str(e))
return None
| bsd-3-clause |
Alwnikrotikz/pyglet | tests/clock/SCHEDULE_ONCE.py | 33 | 1396 | #!/usr/bin/env python
'''Test that a scheduled function gets called every once with the correct
time delta.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: TICK.py 310 2006-12-23 15:56:35Z Alex.Holkner $'
import time
import unittest
from pyglet import clock
__noninteractive = True
class SCHEDULE_ONCE(unittest.TestCase):
callback_1_count = 0
callback_2_count = 0
callback_3_count = 0
def callback_1(self, dt):
self.assertTrue(abs(dt - 0.1) < 0.01)
self.callback_1_count += 1
def callback_2(self, dt):
self.assertTrue(abs(dt - 0.35) < 0.01)
self.callback_2_count += 1
def callback_3(self, dt):
self.assertTrue(abs(dt - 0.07) < 0.01)
self.callback_3_count += 1
def clear(self):
self.callback_1_count = 0
self.callback_2_count = 0
self.callback_3_count = 0
def test_schedule_once(self):
self.clear()
clock.set_default(clock.Clock())
clock.schedule_once(self.callback_1, 0.1)
clock.schedule_once(self.callback_2, 0.35)
clock.schedule_once(self.callback_3, 0.07)
t = 0
while t < 1:
t += clock.tick()
self.assertTrue(self.callback_1_count == 1)
self.assertTrue(self.callback_2_count == 1)
self.assertTrue(self.callback_3_count == 1)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
EmuxEvans/Wox | PythonHome/Lib/site-packages/pip/vcs/mercurial.py | 392 | 5820 | import os
import tempfile
import re
import sys
from pip.util import call_subprocess
from pip.util import display_path, rmtree
from pip.log import logger
from pip.vcs import vcs, VersionControl
from pip.download import path_to_url
from pip.backwardcompat import ConfigParser
class Mercurial(VersionControl):
name = 'hg'
dirname = '.hg'
repo_name = 'clone'
schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http')
bundle_file = 'hg-clone.txt'
guide = ('# This was a Mercurial repo; to make it a repo again run:\n'
'hg init\nhg pull %(url)s\nhg update -r %(rev)s\n')
def parse_vcs_bundle_file(self, content):
url = rev = None
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
url_match = re.search(r'hg\s*pull\s*(.*)\s*', line)
if url_match:
url = url_match.group(1).strip()
rev_match = re.search(r'^hg\s*update\s*-r\s*(.*)\s*', line)
if rev_match:
rev = rev_match.group(1).strip()
if url and rev:
return url, rev
return None, None
def export(self, location):
"""Export the Hg repository at the url to the destination location"""
temp_dir = tempfile.mkdtemp('-export', 'pip-')
self.unpack(temp_dir)
try:
call_subprocess(
[self.cmd, 'archive', location],
filter_stdout=self._filter, show_stdout=False, cwd=temp_dir)
finally:
rmtree(temp_dir)
def switch(self, dest, url, rev_options):
repo_config = os.path.join(dest, self.dirname, 'hgrc')
config = ConfigParser.SafeConfigParser()
try:
config.read(repo_config)
config.set('paths', 'default', url)
config_file = open(repo_config, 'w')
config.write(config_file)
config_file.close()
except (OSError, ConfigParser.NoSectionError):
e = sys.exc_info()[1]
logger.warn(
'Could not switch Mercurial repository to %s: %s'
% (url, e))
else:
call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def update(self, dest, rev_options):
call_subprocess([self.cmd, 'pull', '-q'], cwd=dest)
call_subprocess(
[self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def obtain(self, dest):
url, rev = self.get_url_rev()
if rev:
rev_options = [rev]
rev_display = ' (to revision %s)' % rev
else:
rev_options = []
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Cloning hg %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess([self.cmd, 'clone', '--noupdate', '-q', url, dest])
call_subprocess([self.cmd, 'update', '-q'] + rev_options, cwd=dest)
def get_url(self, location):
url = call_subprocess(
[self.cmd, 'showconfig', 'paths.default'],
show_stdout=False, cwd=location).strip()
if self._is_local_repository(url):
url = path_to_url(url)
return url.strip()
def get_tag_revs(self, location):
tags = call_subprocess(
[self.cmd, 'tags'], show_stdout=False, cwd=location)
tag_revs = []
for line in tags.splitlines():
tags_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if tags_match:
tag = tags_match.group(1)
rev = tags_match.group(2)
if "tip" != tag:
tag_revs.append((rev.strip(), tag.strip()))
return dict(tag_revs)
def get_branch_revs(self, location):
branches = call_subprocess(
[self.cmd, 'branches'], show_stdout=False, cwd=location)
branch_revs = []
for line in branches.splitlines():
branches_match = re.search(r'([\w\d\.-]+)\s*([\d]+):.*$', line)
if branches_match:
branch = branches_match.group(1)
rev = branches_match.group(2)
if "default" != branch:
branch_revs.append((rev.strip(), branch.strip()))
return dict(branch_revs)
def get_revision(self, location):
current_revision = call_subprocess(
[self.cmd, 'parents', '--template={rev}'],
show_stdout=False, cwd=location).strip()
return current_revision
def get_revision_hash(self, location):
current_rev_hash = call_subprocess(
[self.cmd, 'parents', '--template={node}'],
show_stdout=False, cwd=location).strip()
return current_rev_hash
def get_src_requirement(self, dist, location, find_tags):
repo = self.get_url(location)
if not repo.lower().startswith('hg:'):
repo = 'hg+' + repo
egg_project_name = dist.egg_name().split('-', 1)[0]
if not repo:
return None
current_rev = self.get_revision(location)
current_rev_hash = self.get_revision_hash(location)
tag_revs = self.get_tag_revs(location)
branch_revs = self.get_branch_revs(location)
if current_rev in tag_revs:
# It's a tag
full_egg_name = '%s-%s' % (egg_project_name, tag_revs[current_rev])
elif current_rev in branch_revs:
# It's the tip of a branch
full_egg_name = '%s-%s' % (egg_project_name, branch_revs[current_rev])
else:
full_egg_name = '%s-dev' % egg_project_name
return '%s@%s#egg=%s' % (repo, current_rev_hash, full_egg_name)
vcs.register(Mercurial)
| mit |
tanglei528/glance | glance/openstack/common/processutils.py | 2 | 10827 | # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import errno
import logging as stdlib_logging
import multiprocessing
import os
import random
import shlex
import signal
from eventlet.green import subprocess
from eventlet import greenthread
import six
from glance.openstack.common.gettextutils import _
from glance.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class InvalidArgumentError(Exception):
def __init__(self, message=None):
super(InvalidArgumentError, self).__init__(message)
class UnknownArgumentError(Exception):
def __init__(self, message=None):
super(UnknownArgumentError, self).__init__(message)
class ProcessExecutionError(Exception):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
self.exit_code = exit_code
self.stderr = stderr
self.stdout = stdout
self.cmd = cmd
self.description = description
if description is None:
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _('%(description)s\n'
'Command: %(cmd)s\n'
'Exit code: %(exit_code)s\n'
'Stdout: %(stdout)r\n'
'Stderr: %(stderr)r') % {'description': description,
'cmd': cmd,
'exit_code': exit_code,
'stdout': stdout,
'stderr': stderr}
super(ProcessExecutionError, self).__init__(message)
class NoRootWrapSpecified(Exception):
def __init__(self, message=None):
super(NoRootWrapSpecified, self).__init__(message)
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(*cmd, **kwargs):
"""Helper method to shell out and execute a command through subprocess.
Allows optional retry.
:param cmd: Passed to subprocess.Popen.
:type cmd: string
:param process_input: Send to opened process.
:type process_input: string
:param env_variables: Environment variables and their values that
will be set for the process.
:type env_variables: dict
:param check_exit_code: Single bool, int, or list of allowed exit
codes. Defaults to [0]. Raise
:class:`ProcessExecutionError` unless
program exits with one of these code.
:type check_exit_code: boolean, int, or [int]
:param delay_on_retry: True | False. Defaults to True. If set to True,
wait a short amount of time before retrying.
:type delay_on_retry: boolean
:param attempts: How many times to retry cmd.
:type attempts: int
:param run_as_root: True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:type run_as_root: boolean
:param root_helper: command to prefix to commands called with
run_as_root=True
:type root_helper: string
:param shell: whether or not there should be a shell used to
execute this command. Defaults to false.
:type shell: boolean
:param loglevel: log level for execute commands.
:type loglevel: int. (Should be stdlib_logging.DEBUG or
stdlib_logging.INFO)
:returns: (stdout, stderr) from process execution
:raises: :class:`UnknownArgumentError` on
receiving unknown arguments
:raises: :class:`ProcessExecutionError`
"""
process_input = kwargs.pop('process_input', None)
env_variables = kwargs.pop('env_variables', None)
check_exit_code = kwargs.pop('check_exit_code', [0])
ignore_exit_code = False
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
shell = kwargs.pop('shell', False)
loglevel = kwargs.pop('loglevel', stdlib_logging.DEBUG)
if isinstance(check_exit_code, bool):
ignore_exit_code = not check_exit_code
check_exit_code = [0]
elif isinstance(check_exit_code, int):
check_exit_code = [check_exit_code]
if kwargs:
raise UnknownArgumentError(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root and hasattr(os, 'geteuid') and os.geteuid() != 0:
if not root_helper:
raise NoRootWrapSpecified(
message=_('Command requested root, but did not '
'specify a root helper.'))
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.log(loglevel, 'Running cmd (subprocess): %s',
' '.join(logging.mask_password(cmd)))
_PIPE = subprocess.PIPE # pylint: disable=E1101
if os.name == 'nt':
preexec_fn = None
close_fds = False
else:
preexec_fn = _subprocess_setup
close_fds = True
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=close_fds,
preexec_fn=preexec_fn,
shell=shell,
env=env_variables)
result = None
for _i in six.moves.range(20):
# NOTE(russellb) 20 is an arbitrary number of retries to
# prevent any chance of looping forever here.
try:
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EINTR):
continue
raise
break
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
LOG.log(loglevel, 'Result was %s' % _returncode)
if not ignore_exit_code and _returncode not in check_exit_code:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except ProcessExecutionError:
if not attempts:
raise
else:
LOG.log(loglevel, '%r failed. Retrying.', cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def trycmd(*args, **kwargs):
"""A wrapper around execute() to more easily handle warnings and errors.
Returns an (out, err) tuple of strings containing the output of
the command's stdout and stderr. If 'err' is not empty then the
command can be considered to have failed.
:discard_warnings True | False. Defaults to False. If set to True,
then for succeeding commands, stderr is cleared
"""
discard_warnings = kwargs.pop('discard_warnings', False)
try:
out, err = execute(*args, **kwargs)
failed = False
except ProcessExecutionError as exn:
out, err = '', six.text_type(exn)
failed = True
if not failed and discard_warnings and err:
# Handle commands that output to stderr but otherwise succeed
err = ''
return out, err
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
LOG.debug('Running cmd (SSH): %s', cmd)
if addl_env:
raise InvalidArgumentError(_('Environment not supported over SSH'))
if process_input:
# This is (probably) fixable if we need it...
raise InvalidArgumentError(_('process_input not supported over SSH'))
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
channel = stdout_stream.channel
# NOTE(justinsb): This seems suspicious...
# ...other SSH clients have buffering issues with this approach
stdout = stdout_stream.read()
stderr = stderr_stream.read()
stdin_stream.close()
exit_status = channel.recv_exit_status()
# exit_status == -1 if no exit code was returned
if exit_status != -1:
LOG.debug('Result was %s' % exit_status)
if check_exit_code and exit_status != 0:
raise ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
cmd=cmd)
return (stdout, stderr)
def get_worker_count():
"""Utility to get the default worker count.
@return: The number of CPUs if that can be determined, else a default
worker count of 1 is returned.
"""
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
| apache-2.0 |
slightlymadphoenix/activityPointsApp | activitypoints/lib/python3.5/site-packages/django/contrib/contenttypes/migrations/0002_remove_content_type_name.py | 582 | 1168 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def add_legacy_name(apps, schema_editor):
ContentType = apps.get_model('contenttypes', 'ContentType')
for ct in ContentType.objects.all():
try:
ct.name = apps.get_model(ct.app_label, ct.model)._meta.object_name
except LookupError:
ct.name = ct.model
ct.save()
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='contenttype',
options={'verbose_name': 'content type', 'verbose_name_plural': 'content types'},
),
migrations.AlterField(
model_name='contenttype',
name='name',
field=models.CharField(max_length=100, null=True),
),
migrations.RunPython(
migrations.RunPython.noop,
add_legacy_name,
hints={'model_name': 'contenttype'},
),
migrations.RemoveField(
model_name='contenttype',
name='name',
),
]
| mit |
khkaminska/scikit-learn | examples/svm/plot_oneclass.py | 249 | 2302 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Novelty Detection")
plt.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=plt.cm.Blues_r)
a = plt.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
plt.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = plt.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = plt.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = plt.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
plt.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
plt.show()
| bsd-3-clause |
hkhamm/django_rest_tutorial_2 | env/lib/python2.7/site-packages/django/core/cache/backends/base.py | 112 | 7995 | "Base Cache class."
from __future__ import unicode_literals
import warnings
from django.core.exceptions import ImproperlyConfigured, DjangoRuntimeWarning
from django.utils.module_loading import import_by_path
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Stub class to ensure not passing in a `timeout` argument results in
# the default timeout
DEFAULT_TIMEOUT = object()
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Constructs the key used by all other methods. By default it prepends
the `key_prefix'. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return '%s:%s:%s' % (key_prefix, version, key)
def get_key_func(key_func):
"""
Function to decide which key function to use.
Defaults to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
return import_by_path(key_func)
return default_key_func
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = params.get('KEY_PREFIX', '')
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION', None))
def make_key(self, key, version=None):
"""Constructs the key used by all other methods. By default it
uses the key_func to generate a key (which, by default,
prepends the `key_prefix' and 'version'). An different key
function can be provided at the time of cache construction;
alternatively, you can subclass the cache backend to provide
custom key making behavior.
"""
if version is None:
version = self.version
new_key = self.key_func(key, self.key_prefix, version)
return new_key
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError
def delete(self, key, version=None):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
def has_key(self, key, version=None):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key, version=version) is not None
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def delete_many(self, keys, version=None):
"""
Set a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
def incr_version(self, key, delta=1, version=None):
"""Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if version is None:
version = self.version
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version+delta)
self.delete(key, version=version)
return version+delta
def decr_version(self, key, delta=1, version=None):
"""Substracts delta from the cache version for the supplied key. Returns
the new version.
"""
return self.incr_version(key, -delta, version)
def close(self, **kwargs):
"""Close the cache connection"""
pass
| mit |
jsteemann/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/distutils/mwerkscompiler.py | 54 | 10339 | """distutils.mwerkscompiler
Contains MWerksCompiler, an implementation of the abstract CCompiler class
for MetroWerks CodeWarrior on the Macintosh. Needs work to support CW on
Windows."""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: mwerkscompiler.py 55881 2007-06-11 05:28:45Z neal.norwitz $"
import sys, os, string
from types import *
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
import distutils.util
import distutils.dir_util
from distutils import log
class MWerksCompiler (CCompiler) :
"""Concrete class that implements an interface to MetroWerks CodeWarrior,
as defined by the CCompiler abstract class."""
compiler_type = 'mwerks'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
_rc_extensions = ['.r']
_exp_extension = '.exp'
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = (_c_extensions + _cpp_extensions +
_rc_extensions)
res_extension = '.rsrc'
obj_extension = '.obj' # Not used, really
static_lib_extension = '.lib'
shared_lib_extension = '.slb'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = ''
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
def compile (self,
sources,
output_dir=None,
macros=None,
include_dirs=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
depends=None):
(output_dir, macros, include_dirs) = \
self._fix_compile_args (output_dir, macros, include_dirs)
self.__sources = sources
self.__macros = macros
self.__include_dirs = include_dirs
# Don't need extra_preargs and extra_postargs for CW
return []
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# First fixup.
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
# First examine a couple of options for things that aren't implemented yet
if not target_desc in (self.SHARED_LIBRARY, self.SHARED_OBJECT):
raise DistutilsPlatformError, 'Can only make SHARED_LIBRARY or SHARED_OBJECT targets on the Mac'
if runtime_library_dirs:
raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
if extra_preargs or extra_postargs:
raise DistutilsPlatformError, 'Runtime library dirs not implemented yet'
if len(export_symbols) != 1:
raise DistutilsPlatformError, 'Need exactly one export symbol'
# Next there are various things for which we need absolute pathnames.
# This is because we (usually) create the project in a subdirectory of
# where we are now, and keeping the paths relative is too much work right
# now.
sources = map(self._filename_to_abs, self.__sources)
include_dirs = map(self._filename_to_abs, self.__include_dirs)
if objects:
objects = map(self._filename_to_abs, objects)
else:
objects = []
if build_temp:
build_temp = self._filename_to_abs(build_temp)
else:
build_temp = os.curdir()
if output_dir:
output_filename = os.path.join(output_dir, output_filename)
# The output filename needs special handling: splitting it into dir and
# filename part. Actually I'm not sure this is really needed, but it
# can't hurt.
output_filename = self._filename_to_abs(output_filename)
output_dir, output_filename = os.path.split(output_filename)
# Now we need the short names of a couple of things for putting them
# into the project.
if output_filename[-8:] == '.ppc.slb':
basename = output_filename[:-8]
elif output_filename[-11:] == '.carbon.slb':
basename = output_filename[:-11]
else:
basename = os.path.strip(output_filename)[0]
projectname = basename + '.mcp'
targetname = basename
xmlname = basename + '.xml'
exportname = basename + '.mcp.exp'
prefixname = 'mwerks_%s_config.h'%basename
# Create the directories we need
distutils.dir_util.mkpath(build_temp, dry_run=self.dry_run)
distutils.dir_util.mkpath(output_dir, dry_run=self.dry_run)
# And on to filling in the parameters for the project builder
settings = {}
settings['mac_exportname'] = exportname
settings['mac_outputdir'] = output_dir
settings['mac_dllname'] = output_filename
settings['mac_targetname'] = targetname
settings['sysprefix'] = sys.prefix
settings['mac_sysprefixtype'] = 'Absolute'
sourcefilenames = []
sourcefiledirs = []
for filename in sources + objects:
dirname, filename = os.path.split(filename)
sourcefilenames.append(filename)
if not dirname in sourcefiledirs:
sourcefiledirs.append(dirname)
settings['sources'] = sourcefilenames
settings['libraries'] = libraries
settings['extrasearchdirs'] = sourcefiledirs + include_dirs + library_dirs
if self.dry_run:
print 'CALLING LINKER IN', os.getcwd()
for key, value in settings.items():
print '%20.20s %s'%(key, value)
return
# Build the export file
exportfilename = os.path.join(build_temp, exportname)
log.debug("\tCreate export file %s", exportfilename)
fp = open(exportfilename, 'w')
fp.write('%s\n'%export_symbols[0])
fp.close()
# Generate the prefix file, if needed, and put it in the settings
if self.__macros:
prefixfilename = os.path.join(os.getcwd(), os.path.join(build_temp, prefixname))
fp = open(prefixfilename, 'w')
fp.write('#include "mwerks_shcarbon_config.h"\n')
for name, value in self.__macros:
if value is None:
fp.write('#define %s\n'%name)
else:
fp.write('#define %s %s\n'%(name, value))
fp.close()
settings['prefixname'] = prefixname
# Build the XML file. We need the full pathname (only lateron, really)
# because we pass this pathname to CodeWarrior in an AppleEvent, and CW
# doesn't have a clue about our working directory.
xmlfilename = os.path.join(os.getcwd(), os.path.join(build_temp, xmlname))
log.debug("\tCreate XML file %s", xmlfilename)
import mkcwproject
xmlbuilder = mkcwproject.cwxmlgen.ProjectBuilder(settings)
xmlbuilder.generate()
xmldata = settings['tmp_projectxmldata']
fp = open(xmlfilename, 'w')
fp.write(xmldata)
fp.close()
# Generate the project. Again a full pathname.
projectfilename = os.path.join(os.getcwd(), os.path.join(build_temp, projectname))
log.debug('\tCreate project file %s', projectfilename)
mkcwproject.makeproject(xmlfilename, projectfilename)
# And build it
log.debug('\tBuild project')
mkcwproject.buildproject(projectfilename)
def _filename_to_abs(self, filename):
# Some filenames seem to be unix-like. Convert to Mac names.
## if '/' in filename and ':' in filename:
## raise DistutilsPlatformError, 'Filename may be Unix or Mac style: %s'%filename
## if '/' in filename:
## filename = macurl2path(filename)
filename = distutils.util.convert_path(filename)
if not os.path.isabs(filename):
curdir = os.getcwd()
filename = os.path.join(curdir, filename)
# Finally remove .. components
components = string.split(filename, ':')
for i in range(1, len(components)):
if components[i] == '..':
components[i] = ''
return string.join(components, ':')
def library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for libraries.
"""
return # XXXX Not correct...
def runtime_library_dir_option (self, dir):
"""Return the compiler option to add 'dir' to the list of
directories searched for runtime libraries.
"""
# Nothing needed or Mwerks/Mac.
return
def library_option (self, lib):
"""Return the compiler option to add 'dir' to the list of libraries
linked into the shared library or executable.
"""
return
def find_library_file (self, dirs, lib, debug=0):
"""Search the specified list of directories for a static or shared
library file 'lib' and return the full path to that file. If
'debug' true, look for a debugging version (if that makes sense on
the current platform). Return None if 'lib' wasn't found in any of
the specified directories.
"""
return 0
| apache-2.0 |
codester2/devide.johannes | extra/soappy-cvp/SOAPpy/Config.py | 12 | 7423 | """
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: Config.py,v 1.9 2004/01/31 04:20:05 warnes Exp $'
from version import __version__
import copy, socket
from types import *
from NS import NS
################################################################################
# Configuration class
################################################################################
class SOAPConfig:
__readonly = ('SSLserver', 'SSLclient', 'GSIserver', 'GSIclient')
def __init__(self, config = None, **kw):
d = self.__dict__
if config:
if not isinstance(config, SOAPConfig):
raise AttributeError, \
"initializer must be SOAPConfig instance"
s = config.__dict__
for k, v in s.items():
if k[0] != '_':
d[k] = v
else:
# Setting debug also sets returnFaultInfo,
# dumpHeadersIn, dumpHeadersOut, dumpSOAPIn, and dumpSOAPOut
self.debug = 0
self.dumpFaultInfo = 1
# Setting namespaceStyle sets typesNamespace, typesNamespaceURI,
# schemaNamespace, and schemaNamespaceURI
self.namespaceStyle = '1999'
self.strictNamespaces = 0
self.typed = 1
self.buildWithNamespacePrefix = 1
self.returnAllAttrs = 0
# Strict checking of range for floats and doubles
self.strict_range = 0
# Default encoding for dictionary keys
self.dict_encoding = 'ascii'
# New argument name handling mechanism. See
# README.MethodParameterNaming for details
self.specialArgs = 1
# If unwrap_results=1 and there is only element in the struct,
# SOAPProxy will assume that this element is the result
# and return it rather than the struct containing it.
# Otherwise SOAPproxy will return the struct with all the
# elements as attributes.
self.unwrap_results = 1
# Automatically convert SOAP complex types, and
# (recursively) public contents into the corresponding
# python types. (Private subobjects have names that start
# with '_'.)
#
# Conversions:
# - faultType --> raise python exception
# - arrayType --> array
# - compoundType --> dictionary
#
self.simplify_objects = 0
# Per-class authorization method. If this is set, before
# calling a any class method, the specified authorization
# method will be called. If it returns 1, the method call
# will proceed, otherwise the call will throw with an
# authorization error.
self.authMethod = None
# Globus Support if pyGlobus.io available
try:
from pyGlobus import io;
d['GSIserver'] = 1
d['GSIclient'] = 1
except:
d['GSIserver'] = 0
d['GSIclient'] = 0
# Server SSL support if M2Crypto.SSL available
try:
from M2Crypto import SSL
d['SSLserver'] = 1
except:
d['SSLserver'] = 0
# Client SSL support if socket.ssl available
try:
from socket import ssl
d['SSLclient'] = 1
except:
d['SSLclient'] = 0
for k, v in kw.items():
if k[0] != '_':
setattr(self, k, v)
def __setattr__(self, name, value):
if name in self.__readonly:
raise AttributeError, "readonly configuration setting"
d = self.__dict__
if name in ('typesNamespace', 'typesNamespaceURI',
'schemaNamespace', 'schemaNamespaceURI'):
if name[-3:] == 'URI':
base, uri = name[:-3], 1
else:
base, uri = name, 0
if type(value) == StringType:
if NS.NSMAP.has_key(value):
n = (value, NS.NSMAP[value])
elif NS.NSMAP_R.has_key(value):
n = (NS.NSMAP_R[value], value)
else:
raise AttributeError, "unknown namespace"
elif type(value) in (ListType, TupleType):
if uri:
n = (value[1], value[0])
else:
n = (value[0], value[1])
else:
raise AttributeError, "unknown namespace type"
d[base], d[base + 'URI'] = n
try:
d['namespaceStyle'] = \
NS.STMAP_R[(d['typesNamespace'], d['schemaNamespace'])]
except:
d['namespaceStyle'] = ''
elif name == 'namespaceStyle':
value = str(value)
if not NS.STMAP.has_key(value):
raise AttributeError, "unknown namespace style"
d[name] = value
n = d['typesNamespace'] = NS.STMAP[value][0]
d['typesNamespaceURI'] = NS.NSMAP[n]
n = d['schemaNamespace'] = NS.STMAP[value][1]
d['schemaNamespaceURI'] = NS.NSMAP[n]
elif name == 'debug':
d[name] = \
d['returnFaultInfo'] = \
d['dumpHeadersIn'] = \
d['dumpHeadersOut'] = \
d['dumpSOAPIn'] = \
d['dumpSOAPOut'] = value
else:
d[name] = value
Config = SOAPConfig()
| bsd-3-clause |
phihag/youtube-dl | youtube_dl/extractor/nova.py | 43 | 7050 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
clean_html,
unified_strdate,
)
class NovaIE(InfoExtractor):
IE_DESC = 'TN.cz, Prásk.tv, Nova.cz, Novaplus.cz, FANDA.tv, Krásná.cz and Doma.cz'
_VALID_URL = r'https?://(?:[^.]+\.)?(?P<site>tv(?:noviny)?|tn|novaplus|vymena|fanda|krasna|doma|prask)\.nova\.cz/(?:[^/]+/)+(?P<id>[^/]+?)(?:\.html|/|$)'
_TESTS = [{
'url': 'http://tvnoviny.nova.cz/clanek/novinky/co-na-sebe-sportaci-praskli-vime-jestli-pujde-hrdlicka-na-materskou.html?utm_source=tvnoviny&utm_medium=cpfooter&utm_campaign=novaplus',
'info_dict': {
'id': '1608920',
'display_id': 'co-na-sebe-sportaci-praskli-vime-jestli-pujde-hrdlicka-na-materskou',
'ext': 'flv',
'title': 'Duel: Michal Hrdlička a Petr Suchoň',
'description': 'md5:d0cc509858eee1b1374111c588c6f5d5',
'thumbnail': r're:^https?://.*\.(?:jpg)',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://tn.nova.cz/clanek/tajemstvi-ukryte-v-podzemi-specialni-nemocnice-v-prazske-krci.html#player_13260',
'md5': '1dd7b9d5ea27bc361f110cd855a19bd3',
'info_dict': {
'id': '1757139',
'display_id': 'tajemstvi-ukryte-v-podzemi-specialni-nemocnice-v-prazske-krci',
'ext': 'mp4',
'title': 'Podzemní nemocnice v pražské Krči',
'description': 'md5:f0a42dd239c26f61c28f19e62d20ef53',
'thumbnail': r're:^https?://.*\.(?:jpg)',
}
}, {
'url': 'http://novaplus.nova.cz/porad/policie-modrava/video/5591-policie-modrava-15-dil-blondynka-na-hrbitove',
'info_dict': {
'id': '1756825',
'display_id': '5591-policie-modrava-15-dil-blondynka-na-hrbitove',
'ext': 'flv',
'title': 'Policie Modrava - 15. díl - Blondýnka na hřbitově',
'description': 'md5:dc24e50be5908df83348e50d1431295e', # Make sure this description is clean of html tags
'thumbnail': r're:^https?://.*\.(?:jpg)',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://novaplus.nova.cz/porad/televizni-noviny/video/5585-televizni-noviny-30-5-2015/',
'info_dict': {
'id': '1756858',
'ext': 'flv',
'title': 'Televizní noviny - 30. 5. 2015',
'thumbnail': r're:^https?://.*\.(?:jpg)',
'upload_date': '20150530',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://fanda.nova.cz/clanek/fun-and-games/krvavy-epos-zaklinac-3-divoky-hon-vychazi-vyhrajte-ho-pro-sebe.html',
'info_dict': {
'id': '1753621',
'ext': 'mp4',
'title': 'Zaklínač 3: Divoký hon',
'description': 're:.*Pokud se stejně jako my nemůžete.*',
'thumbnail': r're:https?://.*\.jpg(\?.*)?',
'upload_date': '20150521',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://sport.tn.nova.cz/clanek/sport/hokej/nhl/zivot-jde-dal-hodnotil-po-vyrazeni-z-playoff-jiri-sekac.html',
'only_matching': True,
}, {
'url': 'http://fanda.nova.cz/clanek/fun-and-games/krvavy-epos-zaklinac-3-divoky-hon-vychazi-vyhrajte-ho-pro-sebe.html',
'only_matching': True,
}, {
'url': 'http://doma.nova.cz/clanek/zdravi/prijdte-se-zapsat-do-registru-kostni-drene-jiz-ve-stredu-3-cervna.html',
'only_matching': True,
}, {
'url': 'http://prask.nova.cz/clanek/novinky/co-si-na-sobe-nase-hvezdy-nechaly-pojistit.html',
'only_matching': True,
}, {
'url': 'http://tv.nova.cz/clanek/novinky/zivot-je-zivot-bondovsky-trailer.html',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('id')
site = mobj.group('site')
webpage = self._download_webpage(url, display_id)
video_id = self._search_regex(
[r"(?:media|video_id)\s*:\s*'(\d+)'",
r'media=(\d+)',
r'id="article_video_(\d+)"',
r'id="player_(\d+)"'],
webpage, 'video id')
config_url = self._search_regex(
r'src="(http://tn\.nova\.cz/bin/player/videojs/config\.php\?[^"]+)"',
webpage, 'config url', default=None)
if not config_url:
DEFAULT_SITE_ID = '23000'
SITES = {
'tvnoviny': DEFAULT_SITE_ID,
'novaplus': DEFAULT_SITE_ID,
'vymena': DEFAULT_SITE_ID,
'krasna': DEFAULT_SITE_ID,
'fanda': '30',
'tn': '30',
'doma': '30',
}
site_id = self._search_regex(
r'site=(\d+)', webpage, 'site id', default=None) or SITES.get(site, DEFAULT_SITE_ID)
config_url = ('http://tn.nova.cz/bin/player/videojs/config.php?site=%s&media=%s&jsVar=vjsconfig'
% (site_id, video_id))
config = self._download_json(
config_url, display_id,
'Downloading config JSON',
transform_source=lambda s: s[s.index('{'):s.rindex('}') + 1])
mediafile = config['mediafile']
video_url = mediafile['src']
m = re.search(r'^(?P<url>rtmpe?://[^/]+/(?P<app>[^/]+?))/&*(?P<playpath>.+)$', video_url)
if m:
formats = [{
'url': m.group('url'),
'app': m.group('app'),
'play_path': m.group('playpath'),
'player_path': 'http://tvnoviny.nova.cz/static/shared/app/videojs/video-js.swf',
'ext': 'flv',
}]
else:
formats = [{
'url': video_url,
}]
self._sort_formats(formats)
title = mediafile.get('meta', {}).get('title') or self._og_search_title(webpage)
description = clean_html(self._og_search_description(webpage, default=None))
thumbnail = config.get('poster')
if site == 'novaplus':
upload_date = unified_strdate(self._search_regex(
r'(\d{1,2}-\d{1,2}-\d{4})$', display_id, 'upload date', default=None))
elif site == 'fanda':
upload_date = unified_strdate(self._search_regex(
r'<span class="date_time">(\d{1,2}\.\d{1,2}\.\d{4})', webpage, 'upload date', default=None))
else:
upload_date = None
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'upload_date': upload_date,
'thumbnail': thumbnail,
'formats': formats,
}
| unlicense |
dpmatthews/rose | metomi/rose/apps/fcm_make.py | 4 | 10750 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright (C) 2012-2019 British Crown (Met Office) & Contributors.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------------
"""Builtin application: run "fcm make"."""
import os
from pipes import quote
import shlex
import sys
from tempfile import mkdtemp
from metomi.rose.env import (
env_export, env_var_process, UnboundEnvironmentVariableError)
from metomi.rose.app_run import BuiltinApp, ConfigValueError
from metomi.rose.fs_util import FileSystemEvent
from metomi.rose.popen import RosePopenError
ORIG = 0
CONT = 1
class FCMMakeApp(BuiltinApp):
"""Run "fcm make"."""
CFG_FILE_NAME = "fcm-make%(name)s.cfg"
OPT_JOBS = "4"
SCHEME = "fcm_make"
ORIG_CONT_MAP = (SCHEME, SCHEME + "2")
def get_app_key(self, name):
"""Return the fcm_make* application key if name is fcm_make2*."""
return name.replace(self.ORIG_CONT_MAP[1], self.ORIG_CONT_MAP[0])
def run(self, app_runner, conf_tree, opts, args, uuid, work_files):
"""Run "fcm make".
This application will only work under "rose task-run".
"""
# Determine if this is an original task or a continuation task
orig_cont_map = _conf_value(
conf_tree, ["orig-cont-map"], ":".join(self.ORIG_CONT_MAP)
).split(":", 1)
task = app_runner.suite_engine_proc.get_task_props()
if orig_cont_map[CONT] in task.task_name:
return self._run_cont(
app_runner, conf_tree, opts, args, uuid, task, orig_cont_map)
else:
return self._run_orig(
app_runner, conf_tree, opts, args, uuid, task, orig_cont_map)
def _get_fcm_make_cmd(self, conf_tree, opts, args, dest, make_name):
"""Return a list containing the "fcm make" command to invoke."""
cmd = ["fcm", "make"]
if make_name is None:
make_name = ""
cfg_file_name = self.CFG_FILE_NAME % {"name": make_name}
if os.access(cfg_file_name, os.F_OK | os.R_OK) and dest:
cmd += ["-f", os.path.abspath(cfg_file_name)]
if dest:
cmd += ["-C", dest]
if make_name:
# "-n NAME" option requires fcm-2015.05+
cmd += ["-n", make_name]
if opts.new_mode:
cmd.append("-N")
cmd += ["-j", _conf_value(
conf_tree, ["opt.jobs"],
os.getenv("ROSE_TASK_N_JOBS", self.OPT_JOBS))]
cmd_args = _conf_value(
conf_tree, ["args"], os.getenv("ROSE_TASK_OPTIONS"))
if cmd_args:
cmd += shlex.split(cmd_args)
if args:
cmd += args
return cmd
def _invoke_fcm_make(self, app_runner, conf_tree, opts, args, uuid, task,
dests, fast_root, make_name):
"""Wrap "fcm make" call, may use fast_root working directory."""
if opts.new_mode:
# Remove items in destinations in new mode
# Ensure that it is not the current working directory, which should
# already be cleaned.
open(uuid, "w").close()
try:
for dest in dests:
if dest and ":" in dest:
# Remove a remote destination
auth, name = dest.split(":", 1)
cmd = app_runner.popen.get_cmd(
"ssh", auth, (
"! test -e %(name)s/%(uuid)s && " +
"(ls -d %(name)s || true) && rm -fr %(name)s"
) % {"name": quote(name), "uuid": uuid})
out = app_runner.popen.run_ok(*cmd)[0]
for line in out.splitlines():
if line == name:
app_runner.handle_event(FileSystemEvent(
FileSystemEvent.DELETE, dest))
elif dest and not os.path.exists(os.path.join(dest, uuid)):
# Remove a local destination
app_runner.fs_util.delete(dest)
finally:
os.unlink(uuid)
# "rsync" existing dest to fast working directory, if relevant
# Only work with fcm-2015.05+
dest = dests[0]
if fast_root:
# N.B. Name in "little endian", like cycle task ID
prefix = ".".join([
task.task_name, task.task_cycle_time, task.suite_name])
dest = mkdtemp(prefix=prefix, dir=fast_root)
# N.B. Don't use app_runner.popen.get_cmd("rsync") as we are using
# "rsync" for a local copy.
rsync_prefixes = ["rsync", "-a"]
if not dests[0]:
dests[0] = "."
if os.path.isdir(dests[0]):
cmd = rsync_prefixes + [dests[0] + os.sep, dest + os.sep]
try:
app_runner.popen.run_simple(*cmd)
except RosePopenError:
app_runner.fs_util.delete(dest)
raise
# Launch "fcm make"
cmd = self._get_fcm_make_cmd(conf_tree, opts, args, dest, make_name)
try:
app_runner.popen(*cmd, stdout=sys.stdout, stderr=sys.stderr)
finally:
# "rsync" fast working directory to dests[0], if relevant
if dest != dests[0] and os.path.isdir(dest):
app_runner.fs_util.makedirs(dests[0])
stat = os.stat(dests[0])
cmd = rsync_prefixes + [dest + os.sep, dests[0] + os.sep]
app_runner.popen.run_simple(*cmd)
os.chmod(dests[0], stat.st_mode)
app_runner.fs_util.delete(dest)
def _run_orig(self, app_runner, conf_tree, opts, args, uuid, task,
orig_cont_map):
"""Run "fcm make" in original location."""
# Determine the destination
dest_orig_str = _conf_value(conf_tree, ["dest-orig"])
if (dest_orig_str is None and
_conf_value(conf_tree, ["use-pwd"]) not in ["True", "true"]):
dest_orig_str = os.path.join("share", task.task_name)
dest_orig = dest_orig_str
if dest_orig is not None and not os.path.isabs(dest_orig):
dest_orig = os.path.join(task.suite_dir, dest_orig)
dests = [dest_orig]
# Determine if mirror is necessary or not
# Determine the name of the continuation task
task_name_cont = task.task_name.replace(
orig_cont_map[ORIG], orig_cont_map[CONT])
auth = app_runner.suite_engine_proc.get_task_auth(
task.suite_name, task_name_cont)
if auth is not None:
dest_cont = _conf_value(conf_tree, ["dest-cont"])
if dest_cont is None:
if dest_orig_str is not None:
dest_cont = dest_orig_str
elif dest_orig:
dest_cont = os.path.join("share", task.task_name)
else:
dest_cont = os.path.join(
"work", task.task_cycle_time, task_name_cont)
if not os.path.isabs(dest_cont):
dest_cont = os.path.join(task.suite_dir_rel, dest_cont)
dests.append(auth + ":" + dest_cont)
# Environment variables for backward compat. "fcm make"
# supports arguments as extra configurations since version
# 2014-03.
for name in ["ROSE_TASK_MIRROR_TARGET", "MIRROR_TARGET"]:
env_export(name, dests[CONT], app_runner.event_handler)
# "mirror" for backward compat. Use can specify a null string as
# value to switch off the mirror target configuration.
mirror_step = _conf_value(conf_tree, ["mirror-step"], "mirror")
if mirror_step:
args.append("%s.target=%s" % (mirror_step, dests[CONT]))
# "mirror.prop{config-file.name}" requires fcm-2015.05+
make_name_cont = _conf_value(
conf_tree, ["make-name-cont"],
orig_cont_map[CONT].replace(orig_cont_map[ORIG], ""))
if make_name_cont:
args.append("%s.prop{config-file.name}=%s" % (
mirror_step, make_name_cont))
# Launch "fcm make"
self._invoke_fcm_make(
app_runner, conf_tree, opts, args, uuid, task, dests,
_conf_value(conf_tree, ["fast-dest-root-orig"]),
_conf_value(conf_tree, ["make-name-orig"]))
def _run_cont(self, app_runner, conf_tree, opts, args, uuid, task,
orig_cont_map):
"""Continue "fcm make" in mirror location."""
# Determine the destination
dest_cont = _conf_value(conf_tree, ["dest-cont"])
if dest_cont is None:
dest_cont = _conf_value(conf_tree, ["dest-orig"])
if (dest_cont is None and
_conf_value(conf_tree, ["use-pwd"]) not in ["True", "true"]):
task_name_orig = task.task_name.replace(
orig_cont_map[CONT], orig_cont_map[ORIG])
dest_cont = os.path.join("share", task_name_orig)
if dest_cont and not os.path.isabs(dest_cont):
dest_cont = os.path.join(task.suite_dir, dest_cont)
# Launch "fcm make"
self._invoke_fcm_make(
app_runner, conf_tree, opts, args, uuid, task, [dest_cont],
_conf_value(conf_tree, ["fast-dest-root-cont"]),
_conf_value(conf_tree, ["make-name-cont"],
orig_cont_map[CONT].replace(orig_cont_map[ORIG], "")))
def _conf_value(conf_tree, keys, default=None):
"""Return conf setting value, with env var processed."""
value = conf_tree.node.get_value(keys, default)
if value is None:
return
try:
return env_var_process(value)
except UnboundEnvironmentVariableError as exc:
raise ConfigValueError(keys, value, exc)
| gpl-3.0 |
pku9104038/edx-platform | i18n/tests/test_config.py | 2 | 1163 | import os
from unittest import TestCase
from i18n.config import Configuration, LOCALE_DIR, CONFIGURATION
class TestConfiguration(TestCase):
"""
Tests functionality of i18n/config.py
"""
def test_config(self):
config_filename = os.path.normpath(os.path.join(LOCALE_DIR, 'config.yaml'))
config = Configuration(config_filename)
self.assertEqual(config.source_locale, 'en')
def test_no_config(self):
config_filename = os.path.normpath(os.path.join(LOCALE_DIR, 'no_such_file'))
with self.assertRaises(Exception):
Configuration(config_filename)
def test_valid_configuration(self):
"""
Make sure we have a valid configuration file,
and that it contains an 'en' locale.
Also check values of dummy_locale and source_locale.
"""
self.assertIsNotNone(CONFIGURATION)
locales = CONFIGURATION.locales
self.assertIsNotNone(locales)
self.assertIsInstance(locales, list)
self.assertIn('en', locales)
self.assertEqual('eo', CONFIGURATION.dummy_locale)
self.assertEqual('en', CONFIGURATION.source_locale)
| agpl-3.0 |
davidzchen/tensorflow | tensorflow/python/keras/optimizer_v2/ftrl_test.py | 5 | 21013 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras.optimizer_v2 import ftrl
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adagrad
from tensorflow.python.training import gradient_descent
class FtrlOptimizerTest(test.TestCase):
def doTestFtrlwithoutRegularization(self, use_resource=False):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
if use_resource:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.60260963, -4.29698515]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28432083, -0.56694895]), v1_val)
def testFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=False)
def testResourceFtrlWithoutRegularization(self):
self.doTestFtrlwithoutRegularization(use_resource=True)
def testFtrlwithoutRegularization2(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.55607247, -3.98729396]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.28232238, -0.56096673]), v1_val)
def testMinimizeSparseResourceVariable(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0 = variables.Variable([[1.0, 2.0]], dtype=dtype)
x = constant_op.constant([[4.0], [5.0]], dtype=dtype)
def loss():
pred = math_ops.matmul(embedding_ops.embedding_lookup([var0], [0]), x) # pylint: disable=cell-var-from-loop
return pred * pred
sgd_op = ftrl.Ftrl(1.0).minimize(loss, var_list=[var0])
self.evaluate(variables.global_variables_initializer())
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], self.evaluate(var0))
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType([[0, 1]],
self.evaluate(var0),
atol=0.01)
def testFtrlWithL1(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]), v1_val)
def testFtrlWithBeta(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(3.0, initial_accumulator_value=0.1, beta=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-6.096838, -9.162214]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.717741, -1.425132]), v1_val)
def testFtrlWithL2_Beta(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.1,
beta=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-2.735487, -4.704625]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.294335, -0.586556]), v1_val)
def testFtrlWithL1_L2(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.24059935, -0.46829352]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.02406147, -0.04830509]), v1_val)
def testFtrlWithL1_L2_L2Shrinkage(self):
"""Test the new FTRL op with support for l2 shrinkage.
The addition of this parameter which places a constant pressure on weights
towards the origin causes the gradient descent trajectory to differ. The
weights will tend to have smaller magnitudes with this parameter set.
"""
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([4.0, 3.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-0.22578995, -0.44345796]), v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.14378493, -0.13229476]), v1_val)
def testFtrlWithL1_L2_L2ShrinkageSparse(self):
"""Tests the new FTRL op with support for l2 shrinkage on sparse grads."""
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
var0 = variables.Variable([[1.0], [2.0]], dtype=dtype)
var1 = variables.Variable([[4.0], [3.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
opt = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([[1.0], [2.0]], v0_val)
self.assertAllCloseAccordingToType([[4.0], [3.0]], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([[-0.22578995], [2.]], v0_val)
self.assertAllCloseAccordingToType([[4.], [-0.13229476]], v1_val)
def testFtrlWithL2ShrinkageDoesNotChangeLrSchedule(self):
"""Verifies that l2 shrinkage in FTRL does not change lr schedule."""
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True) as sess:
var0 = variables.Variable([1.0, 2.0], dtype=dtype)
var1 = variables.Variable([1.0, 2.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.1, 0.2], dtype=dtype)
opt0 = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0,
l2_shrinkage_regularization_strength=0.1)
opt1 = ftrl.Ftrl(
3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update0 = opt0.apply_gradients([(grads0, var0)])
update1 = opt1.apply_gradients([(grads1, var1)])
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([1.0, 2.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update0.run()
update1.run()
v0_val, v1_val = self.evaluate([var0, var1])
# var0 is experiencing L2 shrinkage so it should be smaller than var1
# in magnitude.
self.assertTrue((v0_val**2 < v1_val**2).all())
accum0 = sess.run(opt0.get_slot(var0, "accumulator"))
accum1 = sess.run(opt1.get_slot(var1, "accumulator"))
# L2 shrinkage should not change how we update grad accumulator.
self.assertAllCloseAccordingToType(accum0, accum1)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = variables.Variable([[0.0], [0.0]], dtype=dtype)
var1 = variables.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = ops.IndexedSlices(
constant_op.constant([0.1], shape=[1, 1], dtype=dtype),
constant_op.constant([0]), constant_op.constant([2, 1]))
grads1 = ops.IndexedSlices(
constant_op.constant([0.02], shape=[1, 1], dtype=dtype),
constant_op.constant([1]), constant_op.constant([2, 1]))
else:
var0 = variables.Variable([0.0, 0.0], dtype=dtype)
var1 = variables.Variable([0.0, 0.0], dtype=dtype)
grads0 = constant_op.constant([0.1, 0.2], dtype=dtype)
grads1 = constant_op.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
self.evaluate(variables.global_variables_initializer())
v0_val, v1_val = self.evaluate([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = self.evaluate([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
val0, val1 = self.applyOptimizer(
ftrl.Ftrl(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session():
val0, val1 = self.applyOptimizer(
ftrl.Ftrl(
3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with ops.Graph().as_default(), self.cached_session():
val2, val3 = self.applyOptimizer(
adagrad.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseGradientDescentwithoutRegularization(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
val0, val1 = self.applyOptimizer(
ftrl.Ftrl(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0),
dtype,
is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
# TODO(tanzheny, omalleyt): Fix test in eager mode.
for dtype in [dtypes.half, dtypes.float32]:
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
val0, val1 = self.applyOptimizer(
ftrl.Ftrl(
3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with ops.Graph().as_default(), self.cached_session(use_gpu=True):
val2, val3 = self.applyOptimizer(
gradient_descent.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
jphnoel/udata | udata/harvest/tests/test_ods_harvester.py | 1 | 5618 | from __future__ import unicode_literals
import logging
from datetime import datetime
from os.path import join, dirname
import httpretty
from udata.models import Dataset, License
from udata.tests import TestCase, DBTestMixin
from udata.core.organization.factories import OrganizationFactory
from .factories import HarvestSourceFactory
from .. import actions
from ..backends.ods import OdsHarvester
log = logging.getLogger(__name__)
ODS_URL = 'http://etalab-sandbox.opendatasoft.com'
json_filename = join(dirname(__file__), 'search-ods.json')
with open(json_filename) as f:
ODS_RESPONSE = f.read()
class OdsHarvesterTest(DBTestMixin, TestCase):
def setUp(self):
# Create fake licenses
for license_id in OdsHarvester.LICENSES.values():
License.objects.create(id=license_id, title=license_id)
@httpretty.activate
def test_simple(self):
org = OrganizationFactory()
source = HarvestSourceFactory(backend='ods',
url=ODS_URL,
organization=org)
api_url = ''.join((ODS_URL, '/api/datasets/1.0/search/'))
httpretty.register_uri(httpretty.GET, api_url,
body=ODS_RESPONSE,
content_type='application/json')
actions.run(source.slug)
self.assertEqual(
httpretty.last_request().querystring,
{'start': ['0'], 'rows': ['50']}
)
source.reload()
job = source.get_last_job()
self.assertEqual(len(job.items), 3)
self.assertEqual(job.status, 'done')
datasets = {d.extras["harvest:remote_id"]: d for d in Dataset.objects}
self.assertEqual(len(datasets), 2)
self.assertIn("test-a", datasets)
d = datasets["test-a"]
self.assertEqual(d.title, "test-a")
self.assertEqual(d.description, "test-a-description")
self.assertEqual(d.tags, ['culture',
'environment',
'heritage',
'keyword1',
'keyword2'])
self.assertEqual(d.extras["ods:references"], "http://example.com")
self.assertTrue(d.extras["ods:has_records"])
self.assertEqual(d.extras["harvest:remote_id"], "test-a")
self.assertEqual(d.extras["harvest:domain"],
"etalab-sandbox.opendatasoft.com")
self.assertEqual(d.extras["ods:url"],
("http://etalab-sandbox.opendatasoft.com"
"/explore/dataset/test-a/"))
self.assertEqual(d.license.id, "fr-lo")
self.assertEqual(len(d.resources), 2)
resource = d.resources[0]
self.assertEqual(resource.title, 'Export au format CSV')
self.assertIsNotNone(resource.description)
self.assertEqual(resource.format, 'csv')
self.assertEqual(resource.mime, 'text/csv')
self.assertIsInstance(resource.modified, datetime)
self.assertEqual(resource.url,
("http://etalab-sandbox.opendatasoft.com/"
"explore/dataset/test-a/download"
"?format=csv&timezone=Europe/Berlin"
"&use_labels_for_header=true"))
resource = d.resources[1]
self.assertEqual(resource.title, 'Export au format JSON')
self.assertIsNotNone(resource.description)
self.assertEqual(resource.format, 'json')
self.assertEqual(resource.mime, 'application/json')
self.assertIsInstance(resource.modified, datetime)
self.assertEqual(resource.url,
("http://etalab-sandbox.opendatasoft.com/"
"explore/dataset/test-a/download"
"?format=json&timezone=Europe/Berlin"
"&use_labels_for_header=true"))
# test-b has geo feature
self.assertIn("test-b", datasets)
test_b = datasets["test-b"]
self.assertEqual(test_b.tags, ['buildings',
'equipment',
'housing',
'keyword1',
'spatial-planning',
'town-planning'])
self.assertEqual(len(test_b.resources), 4)
resource = test_b.resources[2]
self.assertEqual(resource.title, 'Export au format GeoJSON')
self.assertIsNotNone(resource.description)
self.assertEqual(resource.format, 'json')
self.assertEqual(resource.mime, 'application/vnd.geo+json')
self.assertEqual(resource.url,
("http://etalab-sandbox.opendatasoft.com/"
"explore/dataset/test-b/download"
"?format=geojson&timezone=Europe/Berlin"
"&use_labels_for_header=true"))
resource = test_b.resources[3]
self.assertEqual(resource.title, 'Export au format Shapefile')
self.assertIsNotNone(resource.description)
self.assertEqual(resource.format, 'shp')
self.assertIsNone(resource.mime)
self.assertEqual(resource.url,
("http://etalab-sandbox.opendatasoft.com/"
"explore/dataset/test-b/download"
"?format=shp&timezone=Europe/Berlin"
"&use_labels_for_header=true"))
# test-c has no data
self.assertNotIn('test-c', datasets)
| agpl-3.0 |
deepsrijit1105/edx-platform | common/test/acceptance/pages/lms/staff_view.py | 4 | 3697 | """
Staff view of courseware
"""
from bok_choy.page_object import PageObject
from common.test.acceptance.pages.lms.courseware import CoursewarePage
class StaffPage(CoursewarePage):
"""
View of courseware pages while logged in as course staff
"""
url = None
PREVIEW_MENU_CSS = '.preview-menu'
VIEW_MODE_OPTIONS_CSS = '.preview-menu .action-preview-select option'
def is_browser_on_page(self):
if not super(StaffPage, self).is_browser_on_page():
return False
return self.q(css=self.PREVIEW_MENU_CSS).present
@property
def staff_view_mode(self):
"""
Return the currently chosen view mode, e.g. "Staff", "Student" or a content group.
"""
return self.q(css=self.VIEW_MODE_OPTIONS_CSS).filter(lambda el: el.is_selected()).first.text[0]
def set_staff_view_mode(self, view_mode):
"""
Set the current view mode, e.g. "Staff", "Student" or a content group.
"""
self.q(css=self.VIEW_MODE_OPTIONS_CSS).filter(lambda el: el.text.strip() == view_mode).first.click()
self.wait_for_ajax()
def set_staff_view_mode_specific_student(self, username_or_email):
"""
Set the current preview mode to "Specific Student" with the given username or email
"""
required_mode = "Specific student"
if self.staff_view_mode != required_mode:
self.q(css=self.VIEW_MODE_OPTIONS_CSS).filter(lambda el: el.text == required_mode).first.click()
# Use a script here because .clear() + .send_keys() triggers unwanted behavior if a username is already set
self.browser.execute_script(
'$(".action-preview-username").val("{}").blur().change();'.format(username_or_email)
)
self.wait_for_ajax()
def open_staff_debug_info(self):
"""
Open the staff debug window
Return the page object for it.
"""
self.q(css='a.instructor-info-action').first.click()
staff_debug_page = StaffDebugPage(self.browser)
staff_debug_page.wait_for_page()
return staff_debug_page
def answer_problem(self):
"""
Answers the problem to give state that we can clean
"""
self.q(css='input.check').first.click()
self.wait_for_ajax()
def load_problem_via_ajax(self):
"""
Load problem via ajax by clicking next.
"""
self.q(css="li.next").click()
self.wait_for_ajax()
class StaffDebugPage(PageObject):
"""
Staff Debug modal
"""
url = None
def is_browser_on_page(self):
return self.q(css='section.staff-modal').present
def reset_attempts(self, user=None):
"""
This clicks on the reset attempts link with an optionally
specified user.
"""
if user:
self.q(css='input[id^=sd_fu_]').first.fill(user)
self.q(css='.staff-modal .staff-debug-reset').click()
def delete_state(self, user=None):
"""
This delete's a student's state for the problem
"""
if user:
self.q(css='input[id^=sd_fu_]').fill(user)
self.q(css='.staff-modal .staff-debug-sdelete').click()
def rescore(self, user=None):
"""
This clicks on the reset attempts link with an optionally
specified user.
"""
if user:
self.q(css='input[id^=sd_fu_]').first.fill(user)
self.q(css='.staff-modal .staff-debug-rescore').click()
@property
def idash_msg(self):
"""
Returns the value of #idash_msg
"""
self.wait_for_ajax()
return self.q(css='#idash_msg').text
| agpl-3.0 |
ThiefMaster/sqlalchemy | examples/materialized_paths/materialized_paths.py | 29 | 3989 | """Illustrates the "materialized paths" pattern.
Materialized paths is a way to represent a tree structure in SQL with fast
descendant and ancestor queries at the expense of moving nodes (which require
O(n) UPDATEs in the worst case, where n is the number of nodes in the tree). It
is a good balance in terms of performance and simplicity between the nested
sets model and the adjacency list model.
It works by storing all nodes in a table with a path column, containing a
string of delimited IDs. Think file system paths:
1
1.2
1.3
1.3.4
1.3.5
1.3.6
1.7
1.7.8
1.7.9
1.7.9.10
1.7.11
Descendant queries are simple left-anchored LIKE queries, and ancestors are
already stored in the path itself. Updates require going through all
descendants and changing the prefix.
"""
from sqlalchemy import Column, Integer, String, func, select, create_engine
from sqlalchemy.orm import remote, foreign, relationship, Session
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.sql.expression import cast
from sqlalchemy.dialects.postgresql import ARRAY
Base = declarative_base()
class Node(Base):
__tablename__ = "node"
id = Column(Integer, primary_key=True, autoincrement=False)
path = Column(String(500), nullable=False, index=True)
# To find the descendants of this node, we look for nodes whose path
# starts with this node's path.
descendants = relationship(
"Node", viewonly=True, order_by=path,
primaryjoin=remote(foreign(path)).like(path.concat(".%")))
# Finding the ancestors is a little bit trickier. We need to create a fake
# secondary table since this behaves like a many-to-many join.
secondary = select([
id.label("id"),
func.unnest(cast(func.string_to_array(
func.regexp_replace(path, r"\.?\d+$", ""), "."),
ARRAY(Integer))).label("ancestor_id")
]).alias()
ancestors = relationship("Node", viewonly=True, secondary=secondary,
primaryjoin=id == secondary.c.id,
secondaryjoin=secondary.c.ancestor_id == id,
order_by=path)
@property
def depth(self):
return len(self.path.split(".")) - 1
def __repr__(self):
return "Node(id={})".format(self.id)
def __str__(self):
root_depth = self.depth
s = [str(self.id)]
s.extend(((n.depth - root_depth) * " " + str(n.id))
for n in self.descendants)
return "\n".join(s)
def move_to(self, new_parent):
new_path = new_parent.path + "." + str(self.id)
for n in self.descendants:
n.path = new_path + n.path[len(self.path):]
self.path = new_path
if __name__ == "__main__":
engine = create_engine("postgresql://scott:tiger@localhost/test", echo=True)
Base.metadata.create_all(engine)
session = Session(engine)
print("-" * 80)
print("create a tree")
session.add_all([
Node(id=1, path="1"),
Node(id=2, path="1.2"),
Node(id=3, path="1.3"),
Node(id=4, path="1.3.4"),
Node(id=5, path="1.3.5"),
Node(id=6, path="1.3.6"),
Node(id=7, path="1.7"),
Node(id=8, path="1.7.8"),
Node(id=9, path="1.7.9"),
Node(id=10, path="1.7.9.10"),
Node(id=11, path="1.7.11"),
])
session.flush()
print(str(session.query(Node).get(1)))
print("-" * 80)
print("move 7 under 3")
session.query(Node).get(7).move_to(session.query(Node).get(3))
session.flush()
print(str(session.query(Node).get(1)))
print("-" * 80)
print("move 3 under 2")
session.query(Node).get(3).move_to(session.query(Node).get(2))
session.flush()
print(str(session.query(Node).get(1)))
print("-" * 80)
print("find the ancestors of 10")
print([n.id for n in session.query(Node).get(10).ancestors])
session.close()
Base.metadata.drop_all(engine)
| mit |
Elettronik/SickRage | lib/sqlalchemy/types.py | 78 | 1640 | # types.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Compatiblity namespace for sqlalchemy.sql.types.
"""
__all__ = ['TypeEngine', 'TypeDecorator', 'UserDefinedType',
'INT', 'CHAR', 'VARCHAR', 'NCHAR', 'NVARCHAR', 'TEXT', 'Text',
'FLOAT', 'NUMERIC', 'REAL', 'DECIMAL', 'TIMESTAMP', 'DATETIME',
'CLOB', 'BLOB', 'BINARY', 'VARBINARY', 'BOOLEAN', 'BIGINT',
'SMALLINT', 'INTEGER', 'DATE', 'TIME', 'String', 'Integer',
'SmallInteger', 'BigInteger', 'Numeric', 'Float', 'DateTime',
'Date', 'Time', 'LargeBinary', 'Binary', 'Boolean', 'Unicode',
'Concatenable', 'UnicodeText', 'PickleType', 'Interval', 'Enum']
from .sql.type_api import (
adapt_type,
TypeEngine,
TypeDecorator,
Variant,
to_instance,
UserDefinedType
)
from .sql.sqltypes import (
BIGINT,
BINARY,
BLOB,
BOOLEAN,
BigInteger,
Binary,
_Binary,
Boolean,
CHAR,
CLOB,
Concatenable,
DATE,
DATETIME,
DECIMAL,
Date,
DateTime,
Enum,
FLOAT,
Float,
INT,
INTEGER,
Integer,
Interval,
LargeBinary,
NCHAR,
NVARCHAR,
NullType,
NULLTYPE,
NUMERIC,
Numeric,
PickleType,
REAL,
SchemaType,
SMALLINT,
SmallInteger,
String,
STRINGTYPE,
TEXT,
TIME,
TIMESTAMP,
Text,
Time,
Unicode,
UnicodeText,
VARBINARY,
VARCHAR,
_type_map
)
| gpl-3.0 |
ramitsurana/boto | tests/db/test_lists.py | 136 | 3474 | # Copyright (c) 2010 Chris Moyer http://coredumped.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.sdb.db.property import ListProperty
from boto.sdb.db.model import Model
import time
class SimpleListModel(Model):
"""Test the List Property"""
nums = ListProperty(int)
strs = ListProperty(str)
class TestLists(object):
"""Test the List property"""
def setup_class(cls):
"""Setup this class"""
cls.objs = []
def teardown_class(cls):
"""Remove our objects"""
for o in cls.objs:
try:
o.delete()
except:
pass
def test_list_order(self):
"""Testing the order of lists"""
t = SimpleListModel()
t.nums = [5, 4, 1, 3, 2]
t.strs = ["B", "C", "A", "D", "Foo"]
t.put()
self.objs.append(t)
time.sleep(3)
t = SimpleListModel.get_by_id(t.id)
assert(t.nums == [5, 4, 1, 3, 2])
assert(t.strs == ["B", "C", "A", "D", "Foo"])
def test_old_compat(self):
"""Testing to make sure the old method of encoding lists will still return results"""
t = SimpleListModel()
t.put()
self.objs.append(t)
time.sleep(3)
item = t._get_raw_item()
item['strs'] = ["A", "B", "C"]
item.save()
time.sleep(3)
t = SimpleListModel.get_by_id(t.id)
i1 = sorted(item['strs'])
i2 = t.strs
i2.sort()
assert(i1 == i2)
def test_query_equals(self):
"""We noticed a slight problem with querying, since the query uses the same encoder,
it was asserting that the value was at the same position in the list, not just "in" the list"""
t = SimpleListModel()
t.strs = ["Bizzle", "Bar"]
t.put()
self.objs.append(t)
time.sleep(3)
assert(SimpleListModel.find(strs="Bizzle").count() == 1)
assert(SimpleListModel.find(strs="Bar").count() == 1)
assert(SimpleListModel.find(strs=["Bar", "Bizzle"]).count() == 1)
def test_query_not_equals(self):
"""Test a not equal filter"""
t = SimpleListModel()
t.strs = ["Fizzle"]
t.put()
self.objs.append(t)
time.sleep(3)
print SimpleListModel.all().filter("strs !=", "Fizzle").get_query()
for tt in SimpleListModel.all().filter("strs !=", "Fizzle"):
print tt.strs
assert("Fizzle" not in tt.strs)
| mit |
puttarajubr/commcare-hq | corehq/apps/indicators/admin/forms.py | 4 | 11687 | from django import forms
from django.conf import settings
from django.core.exceptions import ValidationError
from django.core.validators import MinLengthValidator
from django.forms import MultipleChoiceField
from django.forms.util import ErrorList
from corehq.apps.crud.models import BaseAdminCRUDForm
from corehq.apps.indicators.models import FormDataAliasIndicatorDefinition, FormLabelIndicatorDefinition, CaseDataInFormIndicatorDefinition, FormDataInCaseIndicatorDefinition, CouchIndicatorDef, CountUniqueCouchIndicatorDef, MedianCouchIndicatorDef, CombinedCouchViewIndicatorDefinition, SumLastEmittedCouchIndicatorDef, DynamicIndicatorDefinition, NoGroupCouchIndicatorDefBase
from corehq.apps.indicators.utils import get_namespaces, get_namespace_name, get_indicator_domains
from corehq.apps.users.models import Permissions
from dimagi.utils.decorators.memoized import memoized
class BaseIndicatorDefinitionForm(BaseAdminCRUDForm):
slug = forms.SlugField(label="Slug")
namespace = forms.CharField(label="Namespace", widget=forms.Select(choices=[]))
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=':',
empty_permitted=False, doc_id=None, domain=None):
super(BaseIndicatorDefinitionForm, self).__init__(data, files, auto_id, prefix, initial, error_class,
label_suffix, empty_permitted, doc_id)
self.domain = domain
self.fields['namespace'].widget = forms.Select(choices=get_namespaces(self.domain, as_choices=True))
@property
@memoized
def crud_manager(self):
crud_manager = super(BaseIndicatorDefinitionForm, self).crud_manager
crud_manager.domain = self.domain
return crud_manager
class FormLabelIndicatorDefinitionForm(BaseIndicatorDefinitionForm):
xmlns = forms.CharField(label="XMLNS")
def clean_xmlns(self):
if 'xmlns' in self.cleaned_data:
return self.cleaned_data['xmlns'].strip()
doc_class = FormLabelIndicatorDefinition
class FormDataAliasIndicatorDefinitionForm(FormLabelIndicatorDefinitionForm):
question_id = forms.CharField(label="Question ID")
doc_class = FormDataAliasIndicatorDefinition
def clean_question_id(self):
if 'question_id' in self.cleaned_data:
return self.cleaned_data['question_id'].strip()
class CaseDataInFormIndicatorDefinitionForm(FormLabelIndicatorDefinitionForm):
case_property = forms.CharField(label="Case Property")
doc_class = CaseDataInFormIndicatorDefinition
class BaseCaseIndicatorDefinitionForm(BaseIndicatorDefinitionForm):
case_type = forms.CharField(label="Case Type")
def clean_case_type(self):
if 'case_type' in self.cleaned_data:
return self.cleaned_data['case_type'].strip()
class FormDataInCaseForm(BaseCaseIndicatorDefinitionForm):
xmlns = forms.CharField(label="XMLNS of Related Form")
question_id = forms.CharField(label="Question ID of Related Form")
doc_class = FormDataInCaseIndicatorDefinition
def clean_xmlns(self):
if 'xmlns' in self.cleaned_data:
return self.cleaned_data['xmlns'].strip()
def clean_question_id(self):
if 'question_id' in self.cleaned_data:
return self.cleaned_data['question_id'].strip()
class BaseDynamicIndicatorForm(BaseIndicatorDefinitionForm):
title = forms.CharField(label="Title", help_text="This should be the shortened version of the description.")
description = forms.CharField(label="Description", help_text="The description of what this indicator means.")
class CouchIndicatorForm(BaseDynamicIndicatorForm):
couch_view = forms.CharField(label="Couch View")
indicator_key = forms.CharField(label="Indicator Key", required=False)
#todo provide reasonable labels for these
startdate_shift = forms.IntegerField(label="Start Date Shift", required=False)
enddate_shift = forms.IntegerField(label="End Date Shift", required=False)
fixed_datespan_days = forms.IntegerField(label="Fix Datespan by Days", required=False)
fixed_datespan_months = forms.IntegerField(label="Fix Datespan by Months", required=False)
change_doc_type = forms.BooleanField(label="Change Indicator Type?", required=False, initial=False)
doc_type_choices = forms.CharField(label="Choose Indicator Type", required=False, widget=forms.Select(choices=[]))
doc_class = CouchIndicatorDef
def __init__(self, *args, **kwargs):
super(CouchIndicatorForm, self).__init__(*args, **kwargs)
if self.existing_object:
self.fields['doc_type_choices'].widget.choices = [(d.__name__, d.get_nice_name())
for d in self.available_doc_types]
else:
del self.fields['change_doc_type']
del self.fields['doc_type_choices']
@property
def available_doc_types(self):
subclasses = set([CouchIndicatorDef, CountUniqueCouchIndicatorDef,
MedianCouchIndicatorDef, SumLastEmittedCouchIndicatorDef])
return subclasses.difference([self.doc_class])
def clean_fixed_datespan_days(self):
if 'fixed_datespan_days' in self.cleaned_data and self.cleaned_data['fixed_datespan_days']:
return abs(self.cleaned_data['fixed_datespan_days'])
def clean_fixed_datespan_months(self):
if 'fixed_datespan_months' in self.cleaned_data and self.cleaned_data['fixed_datespan_months']:
return abs(self.cleaned_data['fixed_datespan_months'])
def clean_doc_type_choices(self):
if ('doc_type_choices' in self.cleaned_data
and 'change_doc_type' in self.cleaned_data
and self.cleaned_data['change_doc_type']):
subclass_to_class = dict([(d.__name__, d) for d in self.available_doc_types])
if self.existing_object:
self.existing_object.doc_type = self.cleaned_data['doc_type_choices']
self.existing_object.save()
return self.cleaned_data['doc_type_choices']
class CountUniqueCouchIndicatorForm(CouchIndicatorForm):
doc_class = CountUniqueCouchIndicatorDef
class MedianCouchIndicatorForm(CouchIndicatorForm):
doc_class = MedianCouchIndicatorDef
class SumLastEmittedCouchIndicatorForm(CouchIndicatorForm):
doc_class = SumLastEmittedCouchIndicatorDef
class CombinedIndicatorForm(BaseDynamicIndicatorForm):
numerator_slug = forms.SlugField(label="Numerator Slug")
denominator_slug = forms.SlugField(label="Denominator Slug")
doc_class = CombinedCouchViewIndicatorDefinition
@property
def available_slugs(self):
key = [self.cleaned_data['namespace'], self.domain]
slugs = DynamicIndicatorDefinition.get_db().view("indicators/available_to_combine",
group=True,
group_level=3,
startkey=key,
endkey=key+[{}]
).all()
return [s['key'][-1] for s in slugs]
def _check_if_slug_exists(self, slug):
if slug not in self.available_slugs:
raise ValidationError("An indicator with slug %s does not exist. Please create this indicator first."
% slug)
return slug
def clean_numerator_slug(self):
if 'numerator_slug' in self.cleaned_data:
return self._check_if_slug_exists(self.cleaned_data['numerator_slug'])
def clean_denominator_slug(self):
if 'denominator_slug' in self.cleaned_data:
return self._check_if_slug_exists(self.cleaned_data['denominator_slug'])
class BulkCopyIndicatorsForm(forms.Form):
destination_domain = forms.CharField(label="Destination Project Space")
indicator_ids = MultipleChoiceField(
label="Indicator(s)",
validators=[MinLengthValidator(1)])
def __init__(self, domain=None, couch_user=None, indicator_class=None, *args, **kwargs):
super(BulkCopyIndicatorsForm, self).__init__(*args, **kwargs)
self.domain = domain
self.couch_user = couch_user
self.indicator_class = indicator_class
self.fields['destination_domain'].widget = forms.Select(choices=[(d, d) for d in self.available_domains])
self.fields['indicator_ids'].choices = self.available_indicators
@property
@memoized
def available_domains(self):
if not self.couch_user:
return []
indicator_domains = set(get_indicator_domains())
indicator_domains = indicator_domains.difference([self.domain])
return [d for d in indicator_domains if self.couch_user.has_permission(d, Permissions.edit_data)]
@property
@memoized
def available_indicators(self):
indicators = []
for namespace in get_namespaces(self.domain):
indicators.extend(self.indicator_class.get_all_of_type(namespace, self.domain))
return [(i._id, "%s | v. %d | n: %s" % (i.slug, i.version if i.version else 0,
get_namespace_name(i.domain, i.namespace))) for i in indicators]
def clean_destination_domain(self):
if 'destination_domain' in self.cleaned_data:
destination = self.cleaned_data['destination_domain']
if not self.couch_user or not self.couch_user.has_permission(destination, Permissions.edit_data):
raise ValidationError("You do not have permission to copy indicators to this project space.")
if destination not in self.available_domains:
raise ValidationError("You submitted an invalid destination project space")
return destination
def copy_indicators(self):
failed = []
success = []
destination_domain = self.cleaned_data['destination_domain']
available_namespaces = get_namespaces(destination_domain)
indicator_ids = self.cleaned_data['indicator_ids']
for indicator_id in indicator_ids:
try:
indicator = self.indicator_class.get(indicator_id)
properties_to_exclude = [
'last_modified',
'base_doc',
'namespace',
'domain',
'class_path',
'version'
]
if indicator.namespace not in available_namespaces:
failed.append(dict(indicator=indicator.slug,
reason='Indicator namespace not available for destination project.'))
continue
properties = set(indicator.properties().keys())
copied_properties = properties.difference(properties_to_exclude)
copied_properties = dict([(p, getattr(indicator, p)) for p in copied_properties])
copied_indicator = self.indicator_class.increment_or_create_unique(
indicator.namespace,
destination_domain,
**copied_properties
)
if copied_indicator:
success.append(copied_indicator.slug)
except Exception as e:
failed.append(dict(indicator=indicator_id,
reason='Could not retrieve indicator %s due to error %s:' % (indicator_id, e)))
return {
'success': success,
'failure': failed,
}
| bsd-3-clause |
philsch/ansible | lib/ansible/modules/system/puppet.py | 36 | 9409 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner
version_added: "2.0"
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
required: false
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
required: false
default: None
modulepath:
description:
- Path to an alternate location for puppet modules
required: false
default: None
version_added: "2.4"
manifest:
description:
- Path to the manifest file to run puppet apply on.
required: false
default: None
facts:
description:
- A dict of values to pass in as persistent external facter facts
required: false
default: None
facter_basename:
description:
- Basename of the facter output file
required: false
default: ansible
environment:
description:
- Puppet environment to be used.
required: false
default: None
logdest:
description:
- Where the puppet logs should go, if puppet apply is being used
required: false
default: stdout
choices: [ 'stdout', 'syslog' ]
version_added: "2.1"
certname:
description:
- The name to use when handling certificates.
required: false
default: None
version_added: "2.1"
tags:
description:
- A comma-separated list of puppet tags to be used.
required: false
default: None
version_added: "2.1"
execute:
description:
- Execute a specific piece of Puppet code. It has no effect with
a puppetmaster.
required: false
default: None
version_added: "2.1"
requirements: [ puppet ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Run puppet agent and fail if anything goes wrong
- puppet
# Run puppet and timeout in 5 minutes
- puppet:
timeout: 5m
# Run puppet using a different environment
- puppet:
environment: testing
# Run puppet using a specific certname
- puppet:
certname: agent01.example.com
# Run puppet using a specific piece of Puppet code. Has no effect with a
# puppetmaster.
- puppet:
execute: 'include ::mymodule'
# Run puppet using a specific tags
- puppet:
tags: update,nginx
'''
import os
import pipes
import stat
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(default="30m"),
puppetmaster=dict(required=False, default=None),
modulepath=dict(required=False, default=None),
manifest=dict(required=False, default=None),
logdest=dict(
required=False, default='stdout',
choices=['stdout', 'syslog']),
show_diff=dict(
# internal code to work with --diff, do not use
default=False, aliases=['show-diff'], type='bool'),
facts=dict(default=None, type='dict'),
facter_basename=dict(default='ansible'),
environment=dict(required=False, default=None),
certname=dict(required=False, default=None),
tags=dict(required=False, default=None, type='list'),
execute=dict(required=False, default=None),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
('puppetmaster', 'manifest', 'execute'),
('puppetmaster', 'modulepath')
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
global TIMEOUT_CMD
TIMEOUT_CMD = module.get_bin_path("timeout", False)
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.",
disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
if TIMEOUT_CMD:
base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout_cmd=TIMEOUT_CMD,
timeout=pipes.quote(p['timeout']),
puppet_cmd=PUPPET_CMD)
else:
base_cmd = PUPPET_CMD
if not p['manifest']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose --color 0") % dict(
base_cmd=base_cmd,
)
if p['puppetmaster']:
cmd += " --server %s" % pipes.quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['logdest'] == 'syslog':
cmd += "--logdest syslog "
if p['modulepath']:
cmd += "--modulepath='%s'" % p['modulepath']
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if p['execute']:
cmd += " --execute '%s'" % p['execute']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if module.check_mode:
cmd += "--noop "
else:
cmd += "--no-noop "
cmd += pipes.quote(p['manifest'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
fighterCui/L4ReFiascoOC | l4/pkg/python/contrib/Lib/test/test_textwrap.py | 55 | 23220 | #
# Test suite for the textwrap module.
#
# Original tests written by Greg Ward <gward@python.net>.
# Converted to PyUnit by Peter Hansen <peter@engcorp.com>.
# Currently maintained by Greg Ward.
#
# $Id: test_textwrap.py 67896 2008-12-21 17:01:26Z benjamin.peterson $
#
import unittest
from test import test_support
from textwrap import TextWrapper, wrap, fill, dedent
class BaseTestCase(unittest.TestCase):
'''Parent class with utility methods for textwrap tests.'''
def show(self, textin):
if isinstance(textin, list):
result = []
for i in range(len(textin)):
result.append(" %d: %r" % (i, textin[i]))
result = '\n'.join(result)
elif isinstance(textin, basestring):
result = " %s\n" % repr(textin)
return result
def check(self, result, expect):
self.assertEquals(result, expect,
'expected:\n%s\nbut got:\n%s' % (
self.show(expect), self.show(result)))
def check_wrap(self, text, width, expect, **kwargs):
result = wrap(text, width, **kwargs)
self.check(result, expect)
def check_split(self, text, expect):
result = self.wrapper._split(text)
self.assertEquals(result, expect,
"\nexpected %r\n"
"but got %r" % (expect, result))
class WrapTestCase(BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper(width=45)
def test_simple(self):
# Simple case: just words, spaces, and a bit of punctuation
text = "Hello there, how are you this fine day? I'm glad to hear it!"
self.check_wrap(text, 12,
["Hello there,",
"how are you",
"this fine",
"day? I'm",
"glad to hear",
"it!"])
self.check_wrap(text, 42,
["Hello there, how are you this fine day?",
"I'm glad to hear it!"])
self.check_wrap(text, 80, [text])
def test_whitespace(self):
# Whitespace munging and end-of-sentence detection
text = """\
This is a paragraph that already has
line breaks. But some of its lines are much longer than the others,
so it needs to be wrapped.
Some lines are \ttabbed too.
What a mess!
"""
expect = ["This is a paragraph that already has line",
"breaks. But some of its lines are much",
"longer than the others, so it needs to be",
"wrapped. Some lines are tabbed too. What a",
"mess!"]
wrapper = TextWrapper(45, fix_sentence_endings=True)
result = wrapper.wrap(text)
self.check(result, expect)
result = wrapper.fill(text)
self.check(result, '\n'.join(expect))
def test_fix_sentence_endings(self):
wrapper = TextWrapper(60, fix_sentence_endings=True)
# SF #847346: ensure that fix_sentence_endings=True does the
# right thing even on input short enough that it doesn't need to
# be wrapped.
text = "A short line. Note the single space."
expect = ["A short line. Note the single space."]
self.check(wrapper.wrap(text), expect)
# Test some of the hairy end cases that _fix_sentence_endings()
# is supposed to handle (the easy stuff is tested in
# test_whitespace() above).
text = "Well, Doctor? What do you think?"
expect = ["Well, Doctor? What do you think?"]
self.check(wrapper.wrap(text), expect)
text = "Well, Doctor?\nWhat do you think?"
self.check(wrapper.wrap(text), expect)
text = 'I say, chaps! Anyone for "tennis?"\nHmmph!'
expect = ['I say, chaps! Anyone for "tennis?" Hmmph!']
self.check(wrapper.wrap(text), expect)
wrapper.width = 20
expect = ['I say, chaps!', 'Anyone for "tennis?"', 'Hmmph!']
self.check(wrapper.wrap(text), expect)
text = 'And she said, "Go to hell!"\nCan you believe that?'
expect = ['And she said, "Go to',
'hell!" Can you',
'believe that?']
self.check(wrapper.wrap(text), expect)
wrapper.width = 60
expect = ['And she said, "Go to hell!" Can you believe that?']
self.check(wrapper.wrap(text), expect)
text = 'File stdio.h is nice.'
expect = ['File stdio.h is nice.']
self.check(wrapper.wrap(text), expect)
def test_wrap_short(self):
# Wrapping to make short lines longer
text = "This is a\nshort paragraph."
self.check_wrap(text, 20, ["This is a short",
"paragraph."])
self.check_wrap(text, 40, ["This is a short paragraph."])
def test_wrap_short_1line(self):
# Test endcases
text = "This is a short line."
self.check_wrap(text, 30, ["This is a short line."])
self.check_wrap(text, 30, ["(1) This is a short line."],
initial_indent="(1) ")
def test_hyphenated(self):
# Test breaking hyphenated words
text = ("this-is-a-useful-feature-for-"
"reformatting-posts-from-tim-peters'ly")
self.check_wrap(text, 40,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 41,
["this-is-a-useful-feature-for-",
"reformatting-posts-from-tim-peters'ly"])
self.check_wrap(text, 42,
["this-is-a-useful-feature-for-reformatting-",
"posts-from-tim-peters'ly"])
def test_hyphenated_numbers(self):
# Test that hyphenated numbers (eg. dates) are not broken like words.
text = ("Python 1.0.0 was released on 1994-01-26. Python 1.0.1 was\n"
"released on 1994-02-15.")
self.check_wrap(text, 35, ['Python 1.0.0 was released on',
'1994-01-26. Python 1.0.1 was',
'released on 1994-02-15.'])
self.check_wrap(text, 40, ['Python 1.0.0 was released on 1994-01-26.',
'Python 1.0.1 was released on 1994-02-15.'])
text = "I do all my shopping at 7-11."
self.check_wrap(text, 25, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 27, ["I do all my shopping at",
"7-11."])
self.check_wrap(text, 29, ["I do all my shopping at 7-11."])
def test_em_dash(self):
# Test text with em-dashes
text = "Em-dashes should be written -- thus."
self.check_wrap(text, 25,
["Em-dashes should be",
"written -- thus."])
# Probe the boundaries of the properly written em-dash,
# ie. " -- ".
self.check_wrap(text, 29,
["Em-dashes should be written",
"-- thus."])
expect = ["Em-dashes should be written --",
"thus."]
self.check_wrap(text, 30, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 36,
["Em-dashes should be written -- thus."])
# The improperly written em-dash is handled too, because
# it's adjacent to non-whitespace on both sides.
text = "You can also do--this or even---this."
expect = ["You can also do",
"--this or even",
"---this."]
self.check_wrap(text, 15, expect)
self.check_wrap(text, 16, expect)
expect = ["You can also do--",
"this or even---",
"this."]
self.check_wrap(text, 17, expect)
self.check_wrap(text, 19, expect)
expect = ["You can also do--this or even",
"---this."]
self.check_wrap(text, 29, expect)
self.check_wrap(text, 31, expect)
expect = ["You can also do--this or even---",
"this."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 35, expect)
# All of the above behaviour could be deduced by probing the
# _split() method.
text = "Here's an -- em-dash and--here's another---and another!"
expect = ["Here's", " ", "an", " ", "--", " ", "em-", "dash", " ",
"and", "--", "here's", " ", "another", "---",
"and", " ", "another!"]
self.check_split(text, expect)
text = "and then--bam!--he was gone"
expect = ["and", " ", "then", "--", "bam!", "--",
"he", " ", "was", " ", "gone"]
self.check_split(text, expect)
def test_unix_options (self):
# Test that Unix-style command-line options are wrapped correctly.
# Both Optik (OptionParser) and Docutils rely on this behaviour!
text = "You should use the -n option, or --dry-run in its long form."
self.check_wrap(text, 20,
["You should use the",
"-n option, or --dry-",
"run in its long",
"form."])
self.check_wrap(text, 21,
["You should use the -n",
"option, or --dry-run",
"in its long form."])
expect = ["You should use the -n option, or",
"--dry-run in its long form."]
self.check_wrap(text, 32, expect)
self.check_wrap(text, 34, expect)
self.check_wrap(text, 35, expect)
self.check_wrap(text, 38, expect)
expect = ["You should use the -n option, or --dry-",
"run in its long form."]
self.check_wrap(text, 39, expect)
self.check_wrap(text, 41, expect)
expect = ["You should use the -n option, or --dry-run",
"in its long form."]
self.check_wrap(text, 42, expect)
# Again, all of the above can be deduced from _split().
text = "the -n option, or --dry-run or --dryrun"
expect = ["the", " ", "-n", " ", "option,", " ", "or", " ",
"--dry-", "run", " ", "or", " ", "--dryrun"]
self.check_split(text, expect)
def test_funky_hyphens (self):
# Screwy edge cases cooked up by David Goodger. All reported
# in SF bug #596434.
self.check_split("what the--hey!", ["what", " ", "the", "--", "hey!"])
self.check_split("what the--", ["what", " ", "the--"])
self.check_split("what the--.", ["what", " ", "the--."])
self.check_split("--text--.", ["--text--."])
# When I first read bug #596434, this is what I thought David
# was talking about. I was wrong; these have always worked
# fine. The real problem is tested in test_funky_parens()
# below...
self.check_split("--option", ["--option"])
self.check_split("--option-opt", ["--option-", "opt"])
self.check_split("foo --option-opt bar",
["foo", " ", "--option-", "opt", " ", "bar"])
def test_punct_hyphens(self):
# Oh bother, SF #965425 found another problem with hyphens --
# hyphenated words in single quotes weren't handled correctly.
# In fact, the bug is that *any* punctuation around a hyphenated
# word was handled incorrectly, except for a leading "--", which
# was special-cased for Optik and Docutils. So test a variety
# of styles of punctuation around a hyphenated word.
# (Actually this is based on an Optik bug report, #813077).
self.check_split("the 'wibble-wobble' widget",
['the', ' ', "'wibble-", "wobble'", ' ', 'widget'])
self.check_split('the "wibble-wobble" widget',
['the', ' ', '"wibble-', 'wobble"', ' ', 'widget'])
self.check_split("the (wibble-wobble) widget",
['the', ' ', "(wibble-", "wobble)", ' ', 'widget'])
self.check_split("the ['wibble-wobble'] widget",
['the', ' ', "['wibble-", "wobble']", ' ', 'widget'])
def test_funky_parens (self):
# Second part of SF bug #596434: long option strings inside
# parentheses.
self.check_split("foo (--option) bar",
["foo", " ", "(--option)", " ", "bar"])
# Related stuff -- make sure parens work in simpler contexts.
self.check_split("foo (bar) baz",
["foo", " ", "(bar)", " ", "baz"])
self.check_split("blah (ding dong), wubba",
["blah", " ", "(ding", " ", "dong),",
" ", "wubba"])
def test_initial_whitespace(self):
# SF bug #622849 reported inconsistent handling of leading
# whitespace; let's test that a bit, shall we?
text = " This is a sentence with leading whitespace."
self.check_wrap(text, 50,
[" This is a sentence with leading whitespace."])
self.check_wrap(text, 30,
[" This is a sentence with", "leading whitespace."])
def test_no_drop_whitespace(self):
# SF patch #1581073
text = " This is a sentence with much whitespace."
self.check_wrap(text, 10,
[" This is a", " ", "sentence ",
"with ", "much white", "space."],
drop_whitespace=False)
if test_support.have_unicode:
def test_unicode(self):
# *Very* simple test of wrapping Unicode strings. I'm sure
# there's more to it than this, but let's at least make
# sure textwrap doesn't crash on Unicode input!
text = u"Hello there, how are you today?"
self.check_wrap(text, 50, [u"Hello there, how are you today?"])
self.check_wrap(text, 20, [u"Hello there, how are", "you today?"])
olines = self.wrapper.wrap(text)
assert isinstance(olines, list) and isinstance(olines[0], unicode)
otext = self.wrapper.fill(text)
assert isinstance(otext, unicode)
def test_no_split_at_umlaut(self):
text = u"Die Empf\xe4nger-Auswahl"
self.check_wrap(text, 13, [u"Die", u"Empf\xe4nger-", u"Auswahl"])
def test_umlaut_followed_by_dash(self):
text = u"aa \xe4\xe4-\xe4\xe4"
self.check_wrap(text, 7, [u"aa \xe4\xe4-", u"\xe4\xe4"])
def test_split(self):
# Ensure that the standard _split() method works as advertised
# in the comments
text = "Hello there -- you goof-ball, use the -b option!"
result = self.wrapper._split(text)
self.check(result,
["Hello", " ", "there", " ", "--", " ", "you", " ", "goof-",
"ball,", " ", "use", " ", "the", " ", "-b", " ", "option!"])
def test_break_on_hyphens(self):
# Ensure that the break_on_hyphens attributes work
text = "yaba daba-doo"
self.check_wrap(text, 10, ["yaba daba-", "doo"],
break_on_hyphens=True)
self.check_wrap(text, 10, ["yaba", "daba-doo"],
break_on_hyphens=False)
def test_bad_width(self):
# Ensure that width <= 0 is caught.
text = "Whatever, it doesn't matter."
self.assertRaises(ValueError, wrap, text, 0)
self.assertRaises(ValueError, wrap, text, -1)
class LongWordTestCase (BaseTestCase):
def setUp(self):
self.wrapper = TextWrapper()
self.text = '''\
Did you say "supercalifragilisticexpialidocious?"
How *do* you spell that odd word, anyways?
'''
def test_break_long(self):
# Wrap text with long words and lots of punctuation
self.check_wrap(self.text, 30,
['Did you say "supercalifragilis',
'ticexpialidocious?" How *do*',
'you spell that odd word,',
'anyways?'])
self.check_wrap(self.text, 50,
['Did you say "supercalifragilisticexpialidocious?"',
'How *do* you spell that odd word, anyways?'])
# SF bug 797650. Prevent an infinite loop by making sure that at
# least one character gets split off on every pass.
self.check_wrap('-'*10+'hello', 10,
['----------',
' h',
' e',
' l',
' l',
' o'],
subsequent_indent = ' '*15)
# bug 1146. Prevent a long word to be wrongly wrapped when the
# preceding word is exactly one character shorter than the width
self.check_wrap(self.text, 12,
['Did you say ',
'"supercalifr',
'agilisticexp',
'ialidocious?',
'" How *do*',
'you spell',
'that odd',
'word,',
'anyways?'])
def test_nobreak_long(self):
# Test with break_long_words disabled
self.wrapper.break_long_words = 0
self.wrapper.width = 30
expect = ['Did you say',
'"supercalifragilisticexpialidocious?"',
'How *do* you spell that odd',
'word, anyways?'
]
result = self.wrapper.wrap(self.text)
self.check(result, expect)
# Same thing with kwargs passed to standalone wrap() function.
result = wrap(self.text, width=30, break_long_words=0)
self.check(result, expect)
class IndentTestCases(BaseTestCase):
# called before each test method
def setUp(self):
self.text = '''\
This paragraph will be filled, first without any indentation,
and then with some (including a hanging indent).'''
def test_fill(self):
# Test the fill() method
expect = '''\
This paragraph will be filled, first
without any indentation, and then with
some (including a hanging indent).'''
result = fill(self.text, 40)
self.check(result, expect)
def test_initial_indent(self):
# Test initial_indent parameter
expect = [" This paragraph will be filled,",
"first without any indentation, and then",
"with some (including a hanging indent)."]
result = wrap(self.text, 40, initial_indent=" ")
self.check(result, expect)
expect = "\n".join(expect)
result = fill(self.text, 40, initial_indent=" ")
self.check(result, expect)
def test_subsequent_indent(self):
# Test subsequent_indent parameter
expect = '''\
* This paragraph will be filled, first
without any indentation, and then
with some (including a hanging
indent).'''
result = fill(self.text, 40,
initial_indent=" * ", subsequent_indent=" ")
self.check(result, expect)
# Despite the similar names, DedentTestCase is *not* the inverse
# of IndentTestCase!
class DedentTestCase(unittest.TestCase):
def assertUnchanged(self, text):
"""assert that dedent() has no effect on 'text'"""
self.assertEquals(text, dedent(text))
def test_dedent_nomargin(self):
# No lines indented.
text = "Hello there.\nHow are you?\nOh good, I'm glad."
self.assertUnchanged(text)
# Similar, with a blank line.
text = "Hello there.\n\nBoo!"
self.assertUnchanged(text)
# Some lines indented, but overall margin is still zero.
text = "Hello there.\n This is indented."
self.assertUnchanged(text)
# Again, add a blank line.
text = "Hello there.\n\n Boo!\n"
self.assertUnchanged(text)
def test_dedent_even(self):
# All lines indented by two spaces.
text = " Hello there.\n How are ya?\n Oh good."
expect = "Hello there.\nHow are ya?\nOh good."
self.assertEquals(expect, dedent(text))
# Same, with blank lines.
text = " Hello there.\n\n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEquals(expect, dedent(text))
# Now indent one of the blank lines.
text = " Hello there.\n \n How are ya?\n Oh good.\n"
expect = "Hello there.\n\nHow are ya?\nOh good.\n"
self.assertEquals(expect, dedent(text))
def test_dedent_uneven(self):
# Lines indented unevenly.
text = '''\
def foo():
while 1:
return foo
'''
expect = '''\
def foo():
while 1:
return foo
'''
self.assertEquals(expect, dedent(text))
# Uneven indentation with a blank line.
text = " Foo\n Bar\n\n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEquals(expect, dedent(text))
# Uneven indentation with a whitespace-only line.
text = " Foo\n Bar\n \n Baz\n"
expect = "Foo\n Bar\n\n Baz\n"
self.assertEquals(expect, dedent(text))
# dedent() should not mangle internal tabs
def test_dedent_preserve_internal_tabs(self):
text = " hello\tthere\n how are\tyou?"
expect = "hello\tthere\nhow are\tyou?"
self.assertEquals(expect, dedent(text))
# make sure that it preserves tabs when it's not making any
# changes at all
self.assertEquals(expect, dedent(expect))
# dedent() should not mangle tabs in the margin (i.e.
# tabs and spaces both count as margin, but are *not*
# considered equivalent)
def test_dedent_preserve_margin_tabs(self):
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# same effect even if we have 8 spaces
text = " hello there\n\thow are you?"
self.assertUnchanged(text)
# dedent() only removes whitespace that can be uniformly removed!
text = "\thello there\n\thow are you?"
expect = "hello there\nhow are you?"
self.assertEquals(expect, dedent(text))
text = " \thello there\n \thow are you?"
self.assertEquals(expect, dedent(text))
text = " \t hello there\n \t how are you?"
self.assertEquals(expect, dedent(text))
text = " \thello there\n \t how are you?"
expect = "hello there\n how are you?"
self.assertEquals(expect, dedent(text))
def test_main():
test_support.run_unittest(WrapTestCase,
LongWordTestCase,
IndentTestCases,
DedentTestCase)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
sss/calibre-at-bzr | src/odf/presentation.py | 96 | 2714 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import PRESENTATIONNS
from element import Element
# ODF 1.0 section 9.6 and 9.7
# Autogenerated
def AnimationGroup(**args):
return Element(qname = (PRESENTATIONNS,'animation-group'), **args)
def Animations(**args):
return Element(qname = (PRESENTATIONNS,'animations'), **args)
def DateTime(**args):
return Element(qname = (PRESENTATIONNS,'date-time'), **args)
def DateTimeDecl(**args):
return Element(qname = (PRESENTATIONNS,'date-time-decl'), **args)
def Dim(**args):
return Element(qname = (PRESENTATIONNS,'dim'), **args)
def EventListener(**args):
return Element(qname = (PRESENTATIONNS,'event-listener'), **args)
def Footer(**args):
return Element(qname = (PRESENTATIONNS,'footer'), **args)
def FooterDecl(**args):
return Element(qname = (PRESENTATIONNS,'footer-decl'), **args)
def Header(**args):
return Element(qname = (PRESENTATIONNS,'header'), **args)
def HeaderDecl(**args):
return Element(qname = (PRESENTATIONNS,'header-decl'), **args)
def HideShape(**args):
return Element(qname = (PRESENTATIONNS,'hide-shape'), **args)
def HideText(**args):
return Element(qname = (PRESENTATIONNS,'hide-text'), **args)
def Notes(**args):
return Element(qname = (PRESENTATIONNS,'notes'), **args)
def Placeholder(**args):
return Element(qname = (PRESENTATIONNS,'placeholder'), **args)
def Play(**args):
return Element(qname = (PRESENTATIONNS,'play'), **args)
def Settings(**args):
return Element(qname = (PRESENTATIONNS,'settings'), **args)
def Show(**args):
return Element(qname = (PRESENTATIONNS,'show'), **args)
def ShowShape(**args):
return Element(qname = (PRESENTATIONNS,'show-shape'), **args)
def ShowText(**args):
return Element(qname = (PRESENTATIONNS,'show-text'), **args)
def Sound(**args):
return Element(qname = (PRESENTATIONNS,'sound'), **args)
| gpl-3.0 |
kawamon/hue | desktop/core/ext-py/sqlparse-0.2.0/tests/test_split.py | 6 | 4024 | # -*- coding: utf-8 -*-
# Tests splitting functions.
import types
import pytest
import sqlparse
from sqlparse.compat import StringIO, text_type
def test_split_semicolon():
sql1 = 'select * from foo;'
sql2 = "select * from foo where bar = 'foo;bar';"
stmts = sqlparse.parse(''.join([sql1, sql2]))
assert len(stmts) == 2
assert str(stmts[0]) == sql1
assert str(stmts[1]) == sql2
def test_split_backslash():
stmts = sqlparse.parse(r"select '\\'; select '\''; select '\\\'';")
assert len(stmts) == 3
@pytest.mark.parametrize('fn', ['function.sql',
'function_psql.sql',
'function_psql2.sql',
'function_psql3.sql'])
def test_split_create_function(load_file, fn):
sql = load_file(fn)
stmts = sqlparse.parse(sql)
assert len(stmts) == 1
assert text_type(stmts[0]) == sql
def test_split_dashcomments(load_file):
sql = load_file('dashcomment.sql')
stmts = sqlparse.parse(sql)
assert len(stmts) == 3
assert ''.join(str(q) for q in stmts) == sql
@pytest.mark.parametrize('s', ['select foo; -- comment\n',
'select foo; -- comment\r',
'select foo; -- comment\r\n',
'select foo; -- comment'])
def test_split_dashcomments_eol(s):
stmts = sqlparse.parse(s)
assert len(stmts) == 1
def test_split_begintag(load_file):
sql = load_file('begintag.sql')
stmts = sqlparse.parse(sql)
assert len(stmts) == 3
assert ''.join(str(q) for q in stmts) == sql
def test_split_begintag_2(load_file):
sql = load_file('begintag_2.sql')
stmts = sqlparse.parse(sql)
assert len(stmts) == 1
assert ''.join(str(q) for q in stmts) == sql
def test_split_dropif():
sql = 'DROP TABLE IF EXISTS FOO;\n\nSELECT * FROM BAR;'
stmts = sqlparse.parse(sql)
assert len(stmts) == 2
assert ''.join(str(q) for q in stmts) == sql
def test_split_comment_with_umlaut():
sql = (u'select * from foo;\n'
u'-- Testing an umlaut: ä\n'
u'select * from bar;')
stmts = sqlparse.parse(sql)
assert len(stmts) == 2
assert ''.join(text_type(q) for q in stmts) == sql
def test_split_comment_end_of_line():
sql = ('select * from foo; -- foo\n'
'select * from bar;')
stmts = sqlparse.parse(sql)
assert len(stmts) == 2
assert ''.join(str(q) for q in stmts) == sql
# make sure the comment belongs to first query
assert str(stmts[0]) == 'select * from foo; -- foo\n'
def test_split_casewhen():
sql = ("SELECT case when val = 1 then 2 else null end as foo;\n"
"comment on table actor is 'The actor table.';")
stmts = sqlparse.split(sql)
assert len(stmts) == 2
def test_split_cursor_declare():
sql = ('DECLARE CURSOR "foo" AS SELECT 1;\n'
'SELECT 2;')
stmts = sqlparse.split(sql)
assert len(stmts) == 2
def test_split_if_function(): # see issue 33
# don't let IF as a function confuse the splitter
sql = ('CREATE TEMPORARY TABLE tmp '
'SELECT IF(a=1, a, b) AS o FROM one; '
'SELECT t FROM two')
stmts = sqlparse.split(sql)
assert len(stmts) == 2
def test_split_stream():
stream = StringIO("SELECT 1; SELECT 2;")
stmts = sqlparse.parsestream(stream)
assert isinstance(stmts, types.GeneratorType)
assert len(list(stmts)) == 2
def test_split_encoding_parsestream():
stream = StringIO("SELECT 1; SELECT 2;")
stmts = list(sqlparse.parsestream(stream))
assert isinstance(stmts[0].tokens[0].value, text_type)
def test_split_unicode_parsestream():
stream = StringIO(u'SELECT ö')
stmts = list(sqlparse.parsestream(stream))
assert str(stmts[0]) == 'SELECT ö'
def test_split_simple():
stmts = sqlparse.split('select * from foo; select * from bar;')
assert len(stmts) == 2
assert stmts[0] == 'select * from foo;'
assert stmts[1] == 'select * from bar;'
| apache-2.0 |
rd37/horizon | openstack_dashboard/dashboards/project/stacks/tables.py | 3 | 7444 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core import urlresolvers
from django.http import Http404 # noqa
from django.template.defaultfilters import title # noqa
from django.utils.http import urlencode # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import messages
from horizon import tables
from horizon.utils import filters
from heatclient import exc
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.stacks import mappings
class LaunchStack(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Stack")
url = "horizon:project:stacks:select_template"
classes = ("btn-create", "ajax-modal")
policy_rules = (("orchestration", "cloudformation:CreateStack"),)
class ChangeStackTemplate(tables.LinkAction):
name = "edit"
verbose_name = _("Change Stack Template")
url = "horizon:project:stacks:change_template"
classes = ("ajax-modal", "btn-edit")
def get_link_url(self, stack):
return urlresolvers.reverse(self.url, args=[stack.id])
class DeleteStack(tables.BatchAction):
name = "delete"
action_present = _("Delete")
action_past = _("Scheduled deletion of %(data_type)s")
data_type_singular = _("Stack")
data_type_plural = _("Stacks")
classes = ('btn-danger', 'btn-terminate')
policy_rules = (("orchestration", "cloudformation:DeleteStack"),)
def action(self, request, stack_id):
api.heat.stack_delete(request, stack_id)
def allowed(self, request, stack):
if stack is not None:
return stack.stack_status != 'DELETE_COMPLETE'
return True
class StacksUpdateRow(tables.Row):
ajax = True
def can_be_selected(self, datum):
return datum.stack_status != 'DELETE_COMPLETE'
def get_data(self, request, stack_id):
try:
return api.heat.stack_get(request, stack_id)
except exc.HTTPNotFound:
# returning 404 to the ajax call removes the
# row from the table on the ui
raise Http404
except Exception as e:
messages.error(request, e)
class StacksTable(tables.DataTable):
STATUS_CHOICES = (
("Complete", True),
("Failed", False),
)
name = tables.Column("stack_name",
verbose_name=_("Stack Name"),
link="horizon:project:stacks:detail",)
created = tables.Column("creation_time",
verbose_name=_("Created"),
filters=(filters.parse_isotime,
filters.timesince_or_never))
updated = tables.Column("updated_time",
verbose_name=_("Updated"),
filters=(filters.parse_isotime,
filters.timesince_or_never))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
def get_object_display(self, stack):
return stack.stack_name
class Meta:
name = "stacks"
verbose_name = _("Stacks")
pagination_param = 'stack_marker'
status_columns = ["status", ]
row_class = StacksUpdateRow
table_actions = (LaunchStack, DeleteStack,)
row_actions = (DeleteStack,
ChangeStackTemplate)
class EventsTable(tables.DataTable):
logical_resource = tables.Column('resource_name',
verbose_name=_("Stack Resource"),
link=lambda d: d.resource_name,)
physical_resource = tables.Column('physical_resource_id',
verbose_name=_("Resource"),
link=mappings.resource_to_url)
timestamp = tables.Column('event_time',
verbose_name=_("Time Since Event"),
filters=(filters.parse_isotime,
filters.timesince_or_never))
status = tables.Column("resource_status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),)
statusreason = tables.Column("resource_status_reason",
verbose_name=_("Status Reason"),)
class Meta:
name = "events"
verbose_name = _("Stack Events")
class ResourcesUpdateRow(tables.Row):
ajax = True
def get_data(self, request, resource_name):
try:
stack = self.table.stack
stack_identifier = '%s/%s' % (stack.stack_name, stack.id)
return api.heat.resource_get(
request, stack_identifier, resource_name)
except exc.HTTPNotFound:
# returning 404 to the ajax call removes the
# row from the table on the ui
raise Http404
except Exception as e:
messages.error(request, e)
class ResourcesTable(tables.DataTable):
STATUS_CHOICES = (
("Create Complete", True),
("Create Failed", False),
)
logical_resource = tables.Column('resource_name',
verbose_name=_("Stack Resource"),
link=lambda d: d.resource_name)
physical_resource = tables.Column('physical_resource_id',
verbose_name=_("Resource"),
link=mappings.resource_to_url)
resource_type = tables.Column("resource_type",
verbose_name=_("Stack Resource Type"),)
updated_time = tables.Column('updated_time',
verbose_name=_("Date Updated"),
filters=(filters.parse_isotime,
filters.timesince_or_never))
status = tables.Column("resource_status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES)
statusreason = tables.Column("resource_status_reason",
verbose_name=_("Status Reason"),)
def __init__(self, request, data=None,
needs_form_wrapper=None, **kwargs):
super(ResourcesTable, self).__init__(
request, data, needs_form_wrapper, **kwargs)
self.stack = kwargs['stack']
def get_object_id(self, datum):
return datum.resource_name
class Meta:
name = "resources"
verbose_name = _("Stack Resources")
status_columns = ["status", ]
row_class = ResourcesUpdateRow
| apache-2.0 |
IshankGulati/scikit-learn | examples/cluster/plot_segmentation_toy.py | 91 | 3522 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
# We use a mask that limits to the foreground: the problem that we are
# interested in here is not separating the objects from the background,
# but separating them one from the other.
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
mkaluza/external_chromium_org | chrome/test/functional/webpagereplay.py | 23 | 8788 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Start and stop Web Page Replay.
Of the public module names, the following one is key:
ReplayServer: a class to start/stop Web Page Replay.
"""
import logging
import os
import re
import signal
import subprocess
import sys
import time
import urllib
_CHROME_BASE_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir))
REPLAY_DIR = os.path.join(
_CHROME_BASE_DIR, 'src', 'third_party', 'webpagereplay')
LOG_PATH = os.path.join(
_CHROME_BASE_DIR, 'src', 'webpagereplay_logs', 'logs.txt')
# Chrome options to make it work with Web Page Replay.
def GetChromeFlags(replay_host, http_port, https_port):
assert replay_host and http_port and https_port, 'All arguments required'
return [
'--host-resolver-rules=MAP * %s,EXCLUDE localhost' % replay_host,
'--testing-fixed-http-port=%s' % http_port,
'--testing-fixed-https-port=%s' % https_port,
'--ignore-certificate-errors',
]
# Signal masks on Linux are inherited from parent processes. If anything
# invoking us accidentally masks SIGINT (e.g. by putting a process in the
# background from a shell script), sending a SIGINT to the child will fail
# to terminate it. Running this signal handler before execing should fix that
# problem.
def ResetInterruptHandler():
signal.signal(signal.SIGINT, signal.SIG_DFL)
class ReplayError(Exception):
"""Catch-all exception for the module."""
pass
class ReplayNotFoundError(ReplayError):
def __init__(self, label, path):
self.args = (label, path)
def __str__(self):
label, path = self.args
return 'Path does not exist for %s: %s' % (label, path)
class ReplayNotStartedError(ReplayError):
pass
class ReplayServer(object):
"""Start and Stop Web Page Replay.
Web Page Replay is a proxy that can record and "replay" web pages with
simulated network characteristics -- without having to edit the pages
by hand. With WPR, tests can use "real" web content, and catch
performance issues that may result from introducing network delays and
bandwidth throttling.
Example:
with ReplayServer(archive_path):
self.NavigateToURL(start_url)
self.WaitUntil(...)
Environment Variables (for development):
WPR_ARCHIVE_PATH: path to alternate archive file (e.g. '/tmp/foo.wpr').
WPR_RECORD: if set, puts Web Page Replay in record mode instead of replay.
WPR_REPLAY_DIR: path to alternate Web Page Replay source.
"""
def __init__(self, archive_path, replay_host, http_port, https_port,
replay_options=None, replay_dir=None,
log_path=None):
"""Initialize ReplayServer.
Args:
archive_path: a path to a specific WPR archive (required).
replay_host: the hostname to serve traffic.
http_port: an integer port on which to serve HTTP traffic. May be zero
to let the OS choose an available port.
https_port: an integer port on which to serve HTTPS traffic. May be zero
to let the OS choose an available port.
replay_options: an iterable of options strings to forward to replay.py.
replay_dir: directory that has replay.py and related modules.
log_path: a path to a log file.
"""
self.archive_path = os.environ.get('WPR_ARCHIVE_PATH', archive_path)
self.replay_options = list(replay_options or ())
self.replay_dir = os.environ.get('WPR_REPLAY_DIR', replay_dir or REPLAY_DIR)
self.log_path = log_path or LOG_PATH
self.http_port = http_port
self.https_port = https_port
self._replay_host = replay_host
if 'WPR_RECORD' in os.environ and '--record' not in self.replay_options:
self.replay_options.append('--record')
self.is_record_mode = '--record' in self.replay_options
self._AddDefaultReplayOptions()
self.replay_py = os.path.join(self.replay_dir, 'replay.py')
if self.is_record_mode:
self._CheckPath('archive directory', os.path.dirname(self.archive_path))
elif not os.path.exists(self.archive_path):
self._CheckPath('archive file', self.archive_path)
self._CheckPath('replay script', self.replay_py)
self.log_fh = None
self.replay_process = None
def _AddDefaultReplayOptions(self):
"""Set WPR command-line options. Can be overridden if needed."""
self.replay_options = [
'--host', str(self._replay_host),
'--port', str(self.http_port),
'--ssl_port', str(self.https_port),
'--use_closest_match',
'--no-dns_forwarding',
'--log_level', 'warning'
] + self.replay_options
def _CheckPath(self, label, path):
if not os.path.exists(path):
raise ReplayNotFoundError(label, path)
def _OpenLogFile(self):
log_dir = os.path.dirname(self.log_path)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
return open(self.log_path, 'w')
def WaitForStart(self, timeout):
"""Checks to see if the server is up and running."""
port_re = re.compile(
'.*(?P<protocol>HTTPS?) server started on (?P<host>.*):(?P<port>\d+)')
start_time = time.time()
elapsed_time = 0
while elapsed_time < timeout:
if self.replay_process.poll() is not None:
break # The process has exited.
# Read the ports from the WPR log.
if not self.http_port or not self.https_port:
for line in open(self.log_path).readlines():
m = port_re.match(line.strip())
if m:
if not self.http_port and m.group('protocol') == 'HTTP':
self.http_port = int(m.group('port'))
elif not self.https_port and m.group('protocol') == 'HTTPS':
self.https_port = int(m.group('port'))
# Try to connect to the WPR ports.
if self.http_port and self.https_port:
try:
up_url = '%s://%s:%s/web-page-replay-generate-200'
http_up_url = up_url % ('http', self._replay_host, self.http_port)
https_up_url = up_url % ('https', self._replay_host, self.https_port)
if (200 == urllib.urlopen(http_up_url, None, {}).getcode() and
200 == urllib.urlopen(https_up_url, None, {}).getcode()):
return True
except IOError:
pass
poll_interval = min(max(elapsed_time / 10., .1), 5)
time.sleep(poll_interval)
elapsed_time = time.time() - start_time
return False
def StartServer(self):
"""Start Web Page Replay and verify that it started.
Raises:
ReplayNotStartedError: if Replay start-up fails.
"""
cmd_line = [sys.executable, self.replay_py]
cmd_line.extend(self.replay_options)
cmd_line.append(self.archive_path)
self.log_fh = self._OpenLogFile()
logging.debug('Starting Web-Page-Replay: %s', cmd_line)
kwargs = {'stdout': self.log_fh, 'stderr': subprocess.STDOUT}
if sys.platform.startswith('linux') or sys.platform == 'darwin':
kwargs['preexec_fn'] = ResetInterruptHandler
self.replay_process = subprocess.Popen(cmd_line, **kwargs)
if not self.WaitForStart(30):
log = open(self.log_path).read()
raise ReplayNotStartedError(
'Web Page Replay failed to start. Log output:\n%s' % log)
def StopServer(self):
"""Stop Web Page Replay."""
if self.replay_process:
logging.debug('Trying to stop Web-Page-Replay gracefully')
try:
url = 'http://localhost:%s/web-page-replay-command-exit'
urllib.urlopen(url % self.http_port, None, {})
except IOError:
# IOError is possible because the server might exit without response.
pass
start_time = time.time()
while time.time() - start_time < 10: # Timeout after 10 seconds.
if self.replay_process.poll() is not None:
break
time.sleep(1)
else:
try:
# Use a SIGINT so that it can do graceful cleanup.
self.replay_process.send_signal(signal.SIGINT)
except: # pylint: disable=W0702
# On Windows, we are left with no other option than terminate().
if 'no-dns_forwarding' not in self.replay_options:
logging.warning('DNS configuration might not be restored!')
try:
self.replay_process.terminate()
except: # pylint: disable=W0702
pass
self.replay_process.wait()
if self.log_fh:
self.log_fh.close()
def __enter__(self):
"""Add support for with-statement."""
self.StartServer()
return self
def __exit__(self, unused_exc_type, unused_exc_val, unused_exc_tb):
"""Add support for with-statement."""
self.StopServer()
| bsd-3-clause |
tensorprob/tensorprob | tensorprob/optimizers/scipy_lbfgsb.py | 3 | 1705 |
import numpy as np
import tensorflow as tf
from scipy.optimize import fmin_l_bfgs_b
from ..optimization_result import OptimizationResult
from .base import BaseOptimizer
class ScipyLBFGSBOptimizer(BaseOptimizer):
def __init__(self, verbose=False, callback=None, m=10, factr=1e3, pgtol=1e-3, **kwargs):
self.verbose = verbose
self.callback = callback
self.m = m
self.factr = factr
self.pgtol = pgtol
super(ScipyLBFGSBOptimizer, self).__init__(**kwargs)
def minimize_impl(self, objective, gradient, inits, bounds):
if gradient is None:
approx_grad = True
else:
approx_grad = False
self.niter = 0
def callback(xs):
self.niter += 1
if self.verbose:
if self.niter % 50 == 0:
print('iter ', '\t'.join([x.name.split(':')[0] for x in variables]))
print('{: 4d} {}'.format(self.niter, '\t'.join(map(str, xs))))
if self.callback is not None:
self.callback(xs)
results = fmin_l_bfgs_b(
objective,
inits,
m=self.m,
fprime=gradient,
factr=self.factr,
pgtol=self.pgtol,
callback=callback,
approx_grad=approx_grad,
bounds=bounds,
)
ret = OptimizationResult()
ret.x = results[0]
ret.func = results[1]
ret.niter = results[2]['nit']
ret.calls = results[2]['funcalls']
ret.message = results[2]['task'].decode().lower()
ret.success = results[2]['warnflag'] == 0
return ret
| mit |
glennq/scikit-learn | sklearn/manifold/spectral_embedding_.py | 39 | 20835 | """Spectral Embedding"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Wei LI <kuantkid@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh
from scipy.sparse.linalg import lobpcg
from ..base import BaseEstimator
from ..externals import six
from ..utils import check_random_state, check_array, check_symmetric
from ..utils.extmath import _deterministic_vector_sign_flip
from ..utils.graph import graph_laplacian
from ..utils.sparsetools import connected_components
from ..utils.arpack import eigsh
from ..metrics.pairwise import rbf_kernel
from ..neighbors import kneighbors_graph
def _graph_connected_component(graph, node_id):
"""Find the largest graph connected components that contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components_matrix : array-like, shape: (n_samples,)
An array of bool value indicating the indexes of the nodes
belonging to the largest connected components of the given query
node
"""
n_node = graph.shape[0]
if sparse.issparse(graph):
# speed up row-wise access to boolean connection mask
graph = graph.tocsr()
connected_nodes = np.zeros(n_node, dtype=np.bool)
nodes_to_explore = np.zeros(n_node, dtype=np.bool)
nodes_to_explore[node_id] = True
for _ in range(n_node):
last_num_component = connected_nodes.sum()
np.logical_or(connected_nodes, nodes_to_explore, out=connected_nodes)
if last_num_component >= connected_nodes.sum():
break
indices = np.where(nodes_to_explore)[0]
nodes_to_explore.fill(False)
for i in indices:
if sparse.issparse(graph):
neighbors = graph[i].toarray().ravel()
else:
neighbors = graph[i]
np.logical_or(nodes_to_explore, neighbors, out=nodes_to_explore)
return connected_nodes
def _graph_is_connected(graph):
""" Return whether the graph is connected (True) or Not (False)
Parameters
----------
graph : array-like or sparse matrix, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
Returns
-------
is_connected : bool
True means the graph is fully connected and False means not
"""
if sparse.isspmatrix(graph):
# sparse graph, find all the connected components
n_connected_components, _ = connected_components(graph)
return n_connected_components == 1
else:
# dense graph, find all connected components start from node 0
return _graph_connected_component(graph, 0).sum() == graph.shape[0]
def _set_diag(laplacian, value, norm_laplacian):
"""Set the diagonal of the laplacian matrix and convert it to a
sparse format well suited for eigenvalue decomposition
Parameters
----------
laplacian : array or sparse matrix
The graph laplacian
value : float
The value of the diagonal
norm_laplacian : bool
Whether the value of the diagonal should be changed or not
Returns
-------
laplacian : array or sparse matrix
An array of matrix in a form that is well suited to fast
eigenvalue decomposition, depending on the band width of the
matrix.
"""
n_nodes = laplacian.shape[0]
# We need all entries in the diagonal to values
if not sparse.isspmatrix(laplacian):
if norm_laplacian:
laplacian.flat[::n_nodes + 1] = value
else:
laplacian = laplacian.tocoo()
if norm_laplacian:
diag_idx = (laplacian.row == laplacian.col)
laplacian.data[diag_idx] = value
# If the matrix has a small number of diagonals (as in the
# case of structured matrices coming from images), the
# dia format might be best suited for matvec products:
n_diags = np.unique(laplacian.row - laplacian.col).size
if n_diags <= 7:
# 3 or less outer diagonals on each side
laplacian = laplacian.todia()
else:
# csr has the fastest matvec and is thus best suited to
# arpack
laplacian = laplacian.tocsr()
return laplacian
def spectral_embedding(adjacency, n_components=8, eigen_solver=None,
random_state=None, eigen_tol=0.0,
norm_laplacian=True, drop_first=True):
"""Project the sample on the first eigenvectors of the graph Laplacian.
The adjacency matrix is used to compute a normalized graph Laplacian
whose spectrum (especially the eigenvectors associated to the
smallest eigenvalues) has an interpretation in terms of minimal
number of cuts necessary to split the graph into comparably sized
components.
This embedding can also 'work' even if the ``adjacency`` variable is
not strictly the adjacency matrix of a graph but more generally
an affinity or similarity matrix between samples (for instance the
heat kernel of a euclidean distance matrix or a k-NN matrix).
However care must taken to always make the affinity matrix symmetric
so that the eigenvector decomposition works as expected.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
----------
adjacency : array-like or sparse matrix, shape: (n_samples, n_samples)
The adjacency matrix of the graph to embed.
n_components : integer, optional, default 8
The dimension of the projection subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}, default None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
eigen_tol : float, optional, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
drop_first : bool, optional, default=True
Whether to drop the first eigenvector. For spectral embedding, this
should be True as the first eigenvector should be constant vector for
connected graph, but for spectral clustering, this should be kept as
False to retain the first eigenvector.
norm_laplacian : bool, optional, default=True
If True, then compute normalized Laplacian.
Returns
-------
embedding : array, shape=(n_samples, n_components)
The reduced samples.
Notes
-----
Spectral embedding is most useful when the graph has one connected
component. If there graph has many components, the first few eigenvectors
will simply uncover the connected components of the graph.
References
----------
* https://en.wikipedia.org/wiki/LOBPCG
* Toward the Optimal Preconditioned Eigensolver: Locally Optimal
Block Preconditioned Conjugate Gradient Method
Andrew V. Knyazev
http://dx.doi.org/10.1137%2FS1064827500366124
"""
adjacency = check_symmetric(adjacency)
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
if eigen_solver == "amg":
raise ValueError("The eigen_solver was set to 'amg', but pyamg is "
"not available.")
if eigen_solver is None:
eigen_solver = 'arpack'
elif eigen_solver not in ('arpack', 'lobpcg', 'amg'):
raise ValueError("Unknown value for eigen_solver: '%s'."
"Should be 'amg', 'arpack', or 'lobpcg'"
% eigen_solver)
random_state = check_random_state(random_state)
n_nodes = adjacency.shape[0]
# Whether to drop the first eigenvector
if drop_first:
n_components = n_components + 1
if not _graph_is_connected(adjacency):
warnings.warn("Graph is not fully connected, spectral embedding"
" may not work as expected.")
laplacian, dd = graph_laplacian(adjacency,
normed=norm_laplacian, return_diag=True)
if (eigen_solver == 'arpack' or eigen_solver != 'lobpcg' and
(not sparse.isspmatrix(laplacian) or n_nodes < 5 * n_components)):
# lobpcg used with eigen_solver='amg' has bugs for low number of nodes
# for details see the source code in scipy:
# https://github.com/scipy/scipy/blob/v0.11.0/scipy/sparse/linalg/eigen
# /lobpcg/lobpcg.py#L237
# or matlab:
# http://www.mathworks.com/matlabcentral/fileexchange/48-lobpcg-m
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# Here we'll use shift-invert mode for fast eigenvalues
# (see http://docs.scipy.org/doc/scipy/reference/tutorial/arpack.html
# for a short explanation of what this means)
# Because the normalized Laplacian has eigenvalues between 0 and 2,
# I - L has eigenvalues between -1 and 1. ARPACK is most efficient
# when finding eigenvalues of largest magnitude (keyword which='LM')
# and when these eigenvalues are very large compared to the rest.
# For very large, very sparse graphs, I - L can have many, many
# eigenvalues very near 1.0. This leads to slow convergence. So
# instead, we'll use ARPACK's shift-invert mode, asking for the
# eigenvalues near 1.0. This effectively spreads-out the spectrum
# near 1.0 and leads to much faster convergence: potentially an
# orders-of-magnitude speedup over simply using keyword which='LA'
# in standard mode.
try:
# We are computing the opposite of the laplacian inplace so as
# to spare a memory allocation of a possibly very large array
laplacian *= -1
v0 = random_state.uniform(-1, 1, laplacian.shape[0])
lambdas, diffusion_map = eigsh(laplacian, k=n_components,
sigma=1.0, which='LM',
tol=eigen_tol, v0=v0)
embedding = diffusion_map.T[n_components::-1] * dd
except RuntimeError:
# When submatrices are exactly singular, an LU decomposition
# in arpack fails. We fallback to lobpcg
eigen_solver = "lobpcg"
# Revert the laplacian to its opposite to have lobpcg work
laplacian *= -1
if eigen_solver == 'amg':
# Use AMG to get a preconditioner and speed up the eigenvalue
# problem.
if not sparse.issparse(laplacian):
warnings.warn("AMG works better for sparse matrices")
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
laplacian = _set_diag(laplacian, 1, norm_laplacian)
ml = smoothed_aggregation_solver(check_array(laplacian, 'csr'))
M = ml.aspreconditioner()
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, M=M, tol=1.e-12,
largest=False)
embedding = diffusion_map.T * dd
if embedding.shape[0] == 1:
raise ValueError
elif eigen_solver == "lobpcg":
# lobpcg needs double precision floats
laplacian = check_array(laplacian, dtype=np.float64,
accept_sparse=True)
if n_nodes < 5 * n_components + 1:
# see note above under arpack why lobpcg has problems with small
# number of nodes
# lobpcg will fallback to eigh, so we short circuit it
if sparse.isspmatrix(laplacian):
laplacian = laplacian.toarray()
lambdas, diffusion_map = eigh(laplacian)
embedding = diffusion_map.T[:n_components] * dd
else:
laplacian = _set_diag(laplacian, 1, norm_laplacian)
# We increase the number of eigenvectors requested, as lobpcg
# doesn't behave well in low dimension
X = random_state.rand(laplacian.shape[0], n_components + 1)
X[:, 0] = dd.ravel()
lambdas, diffusion_map = lobpcg(laplacian, X, tol=1e-15,
largest=False, maxiter=2000)
embedding = diffusion_map.T[:n_components] * dd
if embedding.shape[0] == 1:
raise ValueError
embedding = _deterministic_vector_sign_flip(embedding)
if drop_first:
return embedding[1:n_components].T
else:
return embedding[:n_components].T
class SpectralEmbedding(BaseEstimator):
"""Spectral embedding for non-linear dimensionality reduction.
Forms an affinity matrix given by the specified function and
applies spectral decomposition to the corresponding graph laplacian.
The resulting transformation is given by the value of the
eigenvectors for each data point.
Read more in the :ref:`User Guide <spectral_embedding>`.
Parameters
-----------
n_components : integer, default: 2
The dimension of the projected subspace.
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities.
random_state : int seed, RandomState instance, or None, default : None
A pseudo random number generator used for the initialization of the
lobpcg eigenvectors decomposition when eigen_solver == 'amg'.
affinity : string or callable, default : "nearest_neighbors"
How to construct the affinity matrix.
- 'nearest_neighbors' : construct affinity matrix by knn graph
- 'rbf' : construct affinity matrix by rbf kernel
- 'precomputed' : interpret X as precomputed affinity matrix
- callable : use passed in function as affinity
the function takes in data matrix (n_samples, n_features)
and return affinity matrix (n_samples, n_samples).
gamma : float, optional, default : 1/n_features
Kernel coefficient for rbf kernel.
n_neighbors : int, default : max(n_samples/10 , 1)
Number of nearest neighbors for nearest_neighbors graph building.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array, shape = (n_samples, n_components)
Spectral embedding of the training matrix.
affinity_matrix_ : array, shape = (n_samples, n_samples)
Affinity_matrix constructed from samples or precomputed.
References
----------
- A Tutorial on Spectral Clustering, 2007
Ulrike von Luxburg
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- On Spectral Clustering: Analysis and an algorithm, 2011
Andrew Y. Ng, Michael I. Jordan, Yair Weiss
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.19.8100
- Normalized cuts and image segmentation, 2000
Jianbo Shi, Jitendra Malik
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
"""
def __init__(self, n_components=2, affinity="nearest_neighbors",
gamma=None, random_state=None, eigen_solver=None,
n_neighbors=None, n_jobs=1):
self.n_components = n_components
self.affinity = affinity
self.gamma = gamma
self.random_state = random_state
self.eigen_solver = eigen_solver
self.n_neighbors = n_neighbors
self.n_jobs = n_jobs
@property
def _pairwise(self):
return self.affinity == "precomputed"
def _get_affinity_matrix(self, X, Y=None):
"""Calculate the affinity matrix from data
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
affinity_matrix, shape (n_samples, n_samples)
"""
if self.affinity == 'precomputed':
self.affinity_matrix_ = X
return self.affinity_matrix_
if self.affinity == 'nearest_neighbors':
if sparse.issparse(X):
warnings.warn("Nearest neighbors affinity currently does "
"not support sparse input, falling back to "
"rbf affinity")
self.affinity = "rbf"
else:
self.n_neighbors_ = (self.n_neighbors
if self.n_neighbors is not None
else max(int(X.shape[0] / 10), 1))
self.affinity_matrix_ = kneighbors_graph(X, self.n_neighbors_,
include_self=True,
n_jobs=self.n_jobs)
# currently only symmetric affinity_matrix supported
self.affinity_matrix_ = 0.5 * (self.affinity_matrix_ +
self.affinity_matrix_.T)
return self.affinity_matrix_
if self.affinity == 'rbf':
self.gamma_ = (self.gamma
if self.gamma is not None else 1.0 / X.shape[1])
self.affinity_matrix_ = rbf_kernel(X, gamma=self.gamma_)
return self.affinity_matrix_
self.affinity_matrix_ = self.affinity(X)
return self.affinity_matrix_
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
self : object
Returns the instance itself.
"""
X = check_array(X, ensure_min_samples=2, estimator=self)
random_state = check_random_state(self.random_state)
if isinstance(self.affinity, six.string_types):
if self.affinity not in set(("nearest_neighbors", "rbf",
"precomputed")):
raise ValueError(("%s is not a valid affinity. Expected "
"'precomputed', 'rbf', 'nearest_neighbors' "
"or a callable.") % self.affinity)
elif not callable(self.affinity):
raise ValueError(("'affinity' is expected to be an affinity "
"name or a callable. Got: %s") % self.affinity)
affinity_matrix = self._get_affinity_matrix(X)
self.embedding_ = spectral_embedding(affinity_matrix,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples
and n_features is the number of features.
If affinity is "precomputed"
X : array-like, shape (n_samples, n_samples),
Interpret X as precomputed adjacency graph computed from
samples.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self.fit(X)
return self.embedding_
| bsd-3-clause |
Antiun/sale-workflow | sale_properties_dynamic_fields/__openerp__.py | 15 | 1411 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014-15 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': "Sale properties dynamic fields",
'version': '8.0.1.0.0',
'category': 'Sales Management',
'author': "Agile Business Group, Odoo Community Association (OCA)",
'website': 'http://www.agilebg.com',
'license': 'AGPL-3',
"depends": [
'sale_properties_easy_creation',
],
"data": [
'mrp_property_group_view.xml',
],
"test": [
'test/properties.yml',
],
"installable": True
}
| agpl-3.0 |
emembrives/dispotrains | dispotrains.webapp/src/analysis/all_stations.py | 1 | 6277 | #!/bin/env python3
"""
Extracts all metro and RER stations from an OSM dump.
"""
import xml.etree.cElementTree as ET
import argparse
import csv
from math import radians, cos, sin, asin, sqrt
class Station(object):
"""A train station"""
def __init__(self, name, osm_id, lat, lon, accessible=False):
self._name = name
self._osm_ids = set([int(osm_id)])
self._lat = lat
self._lon = lon
self._accessible = accessible
@property
def name(self):
"""Name of the station."""
return self._name
@property
def osm_ids(self):
"""OpenStreetMap ID"""
return self._osm_ids
@property
def lat(self):
"""Latitude of the station."""
return self._lat
@property
def lon(self):
"""Longitude of the station."""
return self._lon
@property
def accessible(self):
"""True if the station is accessible."""
return self._accessible
def distance(self, other):
"""
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = [radians(x) for x in \
[self.lon, self.lat, other.lon, other.lat]]
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2
c = 2 * asin(sqrt(a))
r = 6371.0 # Radius of earth in kilometers. Use 3956 for miles
return c * r
def merge(self, other):
self._osm_ids.update(other.osm_ids)
@staticmethod
def from_node(node):
"""Creates a Station from an XML node in OSM format."""
name_tags = node.findall("./tag[@k='name']")
name = None
if len(name_tags) != 0 :
name = name_tags[0].get("v")
osm_id = node.get("id")
lat = float(node.get("lat"))
lon = float(node.get("lon"))
return Station(name, osm_id, lat, lon)
def __repr__(self):
return "Station(%s)" % (self.name)
def __eq__(self, other):
if isinstance(other, Station):
return self.name == other.name
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.__repr__())
def extract_stations_from_dump(dump_path):
"""Extract a list of |Station|s from an XML dump."""
tree = ET.parse(dump_path)
root = tree.getroot()
allstation_nodes = root.findall('./node')
allstations = {}
for station_node in allstation_nodes:
station = Station.from_node(station_node)
if station.name in allstations:
allstations[station.name].merge(station)
else:
allstations[station.name] = station
return merge_osm_stations(allstations.values())
MERGE_STATIONS = {
26824135: [27371889, 1309031698, 1308998006], # Gare de Lyon
1731763794: [241928557], # Nation
3533789791: [3542631493], # Saint Lazare
243496033: [1731763792], # Etoile
3574677130: [1785132453], # Pont du Garigliano
3586000197: [137533248], # La Défense
269296749: [241926523], # Marne la Vallée Chessy
225119209: [3530909557, 1882558198], # CDG 2
3531066587: [1883637808], # La Fraternelle - Rungis
327613695: [3090733718], # Gare du Nord
255687197: [2367372622], # Issy Val de Seine
264778142: [2799009872], # Porte de la Villette
}
def merge_osm_stations(stations):
stations = list(stations)
def get_station(osm_id):
for station_index in range(len(stations)):
if osm_id in stations[station_index].osm_ids:
return station_index, stations[station_index]
return -1, None
for osm_id, ids_to_merge in MERGE_STATIONS.items():
_, receiver = get_station(osm_id)
for id_to_merge in ids_to_merge:
index_to_merge, station_to_merge = get_station(id_to_merge)
receiver.merge(station_to_merge)
del stations[index_to_merge]
return stations
def extract_accessible_stations(csv_filepath):
"""Extracts stations from a csv file listing accessible stations."""
stations = []
with open(csv_filepath) as reader:
csvreader = csv.reader(reader)
for row in csvreader:
stations.append(Station(row[0], row[4], float(row[2]), float(row[3]), True))
return stations
def merge_stations(all_stations, accessible_stations):
"""Merge two lists of stations."""
merged_stations = []
merged_count = 0
for station1 in all_stations:
found = False
for station2 in accessible_stations:
if len(station1.osm_ids.intersection(station2.osm_ids)):
merged_stations.append(station2)
found = True
merged_count += 1
if not found and station1.name:
merged_stations.append(station1)
print(merged_count)
return merged_stations
def print_to_csv(stations):
"""Print a list of stations to CSV."""
with open("full-list.csv", "w") as writer:
csvwriter = csv.writer(writer)
csvwriter.writerow(
["name", "osm_id", "latitude", "longitude", "accessible"])
for station in stations:
csvwriter.writerow(
[station.name, station.osm_ids, station.lat, station.lon, station.accessible])
def _parse_args():
"""Define and parse command-line arguments."""
parser = argparse.ArgumentParser(description='Extract station information.')
parser.add_argument('--osm_dump', type=str,
help='Path of the OSM dump containing train stations')
parser.add_argument('--accessible_csv', type=str,
help='Path to the list of accessible stations (CSV)')
return parser.parse_args()
def _main():
"""Script entry-point."""
args = _parse_args()
all_stations = extract_stations_from_dump(args.osm_dump)
accessible_stations = extract_accessible_stations(args.accessible_csv)
merged_stations = merge_stations(all_stations, accessible_stations)
print_to_csv(merged_stations)
if __name__ == '__main__':
_main()
| apache-2.0 |
ketjow4/NOV | Lib/site-packages/numpy/distutils/unixccompiler.py | 75 | 3651 | """
unixccompiler - can handle very long argument lists for ar.
"""
import os
from distutils.errors import DistutilsExecError, CompileError
from distutils.unixccompiler import *
from numpy.distutils.ccompiler import replace_method
from numpy.distutils.compat import get_exception
if sys.version_info[0] < 3:
import log
else:
from numpy.distutils import log
# Note that UnixCCompiler._compile appeared in Python 2.3
def UnixCCompiler__compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compile a single source files with a Unix-style compiler."""
display = '%s: %s' % (os.path.basename(self.compiler_so[0]),src)
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs, display = display)
except DistutilsExecError:
msg = str(get_exception())
raise CompileError(msg)
replace_method(UnixCCompiler, '_compile', UnixCCompiler__compile)
def UnixCCompiler_create_static_lib(self, objects, output_libname,
output_dir=None, debug=0, target_lang=None):
"""
Build a static library in a separate sub-process.
Parameters
----------
objects : list or tuple of str
List of paths to object files used to build the static library.
output_libname : str
The library name as an absolute or relative (if `output_dir` is used)
path.
output_dir : str, optional
The path to the output directory. Default is None, in which case
the ``output_dir`` attribute of the UnixCCompiler instance.
debug : bool, optional
This parameter is not used.
target_lang : str, optional
This parameter is not used.
Returns
-------
None
"""
objects, output_dir = self._fix_object_args(objects, output_dir)
output_filename = \
self.library_filename(output_libname, output_dir=output_dir)
if self._need_link(objects, output_filename):
try:
# previous .a may be screwed up; best to remove it first
# and recreate.
# Also, ar on OS X doesn't handle updating universal archives
os.unlink(output_filename)
except (IOError, OSError):
pass
self.mkpath(os.path.dirname(output_filename))
tmp_objects = objects + self.objects
while tmp_objects:
objects = tmp_objects[:50]
tmp_objects = tmp_objects[50:]
display = '%s: adding %d object files to %s' % (
os.path.basename(self.archiver[0]),
len(objects), output_filename)
self.spawn(self.archiver + [output_filename] + objects,
display = display)
# Not many Unices required ranlib anymore -- SunOS 4.x is, I
# think the only major Unix that does. Maybe we need some
# platform intelligence here to skip ranlib if it's not
# needed -- or maybe Python's configure script took care of
# it for us, hence the check for leading colon.
if self.ranlib:
display = '%s:@ %s' % (os.path.basename(self.ranlib[0]),
output_filename)
try:
self.spawn(self.ranlib + [output_filename],
display = display)
except DistutilsExecError:
msg = str(get_exception())
raise LibError(msg)
else:
log.debug("skipping %s (up-to-date)", output_filename)
return
replace_method(UnixCCompiler, 'create_static_lib',
UnixCCompiler_create_static_lib)
| gpl-3.0 |
darenr/art-dataset-nlp-experiments | kadist/xlreader.py | 2 | 1336 | # -*- encoding: utf-8 -*-
import xlrd
import json
import csv
import sys
import codecs
if len(sys.argv) != 3:
print "usage: <XL filename> <sheet name>"
sys.exit(-1)
wb = xlrd.open_workbook(sys.argv[1])
sh = wb.sheet_by_name(sys.argv[2])
fieldnames = sh.row_values(0)
data = []
def clean(v):
if type(v) is str:
return unicode(v, "utf-8").strip().replace(u'—', ' ').replace('\n', ' ')
else:
if type(v) is float and v == int(v):
v = int(v)
return v
tagged = []
descriptions = []
for rownum in xrange(1, sh.nrows):
d = dict(zip(fieldnames, [clean(x) for x in sh.row_values(rownum)]))
data.append(d)
id = d['id']
if 'tags' in d and len(d['tags']):
d['tags'] = d['tags'].split(',')
if 'major_tags' in d and len(d['major_tags']):
d['major_tags'] = d['major_tags'].split(',')
tagged.append(d)
if 'description' in d and len(d['description']):
descriptions.append({'id': id, 'text': d['description']})
with codecs.open('kadist.json', 'wb', 'utf-8') as out:
out.write(json.dumps(data, indent=2, ensure_ascii=False))
#with codecs.open('kadist-tagged.json', 'wb', 'utf-8') as out:
# out.write(json.dumps(tagged, indent=2, ensure_ascii=False))
with codecs.open('kadist_descriptions.txt', 'wb', 'utf-8') as out:
out.write(json.dumps(descriptions, indent=2, ensure_ascii=False))
| mit |
jason-weirather/Au-public | iron/utilities/splicemap_wrapper.py | 2 | 3515 | #!/usr/bin/python
import os, subprocess, argparse, sys, re
from random import randint
from shutil import rmtree, copytree
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--bowtie_index',nargs=1,help='path to bowtie index',required=True)
parser.add_argument('--genome',nargs=1,help='path reference genome',required=True)
parser.add_argument('--read_mismatches',nargs=1,help='Number of read mismatches (default 2)')
parser.add_argument('--threads',nargs=1,help='Number of threads (default 1)')
parser.add_argument('--tempdir',nargs=1,help='DIRECTORY location to store temp files')
parser.add_argument('--output',help='DIRECTORYNAME path of directory to save result')
parser.add_argument('--output_all',help='DIRECTORYNAME path of directory to save result')
parser.add_argument('--read_type',default='FASTQ',help='Read type FASTQ or FASTA')
parser.add_argument('reads',nargs='+',help='reads (second file is for a mate')
args = parser.parse_args()
threads = 2
if args.threads:
threads = int(args.threads[0])
read_mismatches = 2
if args.read_mismatches:
read_mismatches = int(args.read_mismatches[0])
if len(args.reads) > 2:
sys.stderr.write("Too many read files. Takes only one fastq file or two (a mate pair)\n")
return
reads1 = args.reads[0]
reads2 = None
if len(args.reads) == 2:
reads2 = args.reads[1]
bdir = args.bowtie_index[0]
#'/Shared/Au/jason/Reference/UCSC/Human/hg19_GRCh37_feb2009/Bowtie_Index/genome'
gdir = './'
wcard = args.genome[0]
m = re.match('^(.*\/)([^\/]+)$',args.genome[0])
if m:
gdir = m.group(1)
wcard = m.group(2)
#'/Shared/Au/jason/Reference/UCSC/Human/hg19_GRCh37_feb2009/Genome/'
tstart = '/tmp'
if args.tempdir:
tstart = args.tempdir[0]
tdir = tstart.rstrip('/')+'/'+'weirathe.'+str(randint(1,100000000))
if not os.path.exists(tdir): os.makedirs(tdir)
# Make a new reads 1 if its gzipped
if re.search('.gz$',reads1):
subprocess.call('zcat '+reads1+' > '+tdir+'/reads1.fq',shell=True)
reads1 = tdir+'/reads1.fq'
if re.search('.gz$',reads2):
subprocess.call('zcat '+reads2+' > '+tdir+'/reads2.fq',shell=True)
reads2 = tdir+'/reads2.fq'
cfg = get_cfg(reads1,reads2,bdir,gdir,wcard,threads,read_mismatches,tdir,args.read_type)
of = open(tdir+'/run.cfg','w')
of.write(cfg)
of.close()
cmd = "runSpliceMap "+tdir+'/run.cfg'
FNULL = open(os.devnull,'w')
stream = subprocess.Popen(cmd.split(),stdout=subprocess.PIPE,stderr=FNULL)
while True:
e = stream.stdout.readline()
if not e: break
if not args.output:
with open(tdir+'/output/good_hits.sam') as inf:
for line in inf:
print line.rstrip()
if args.output_all:
copytree(tdir,args.output)
elif args.output:
copytree(tdir+'/output/',args.output)
rmtree(tdir)
def get_cfg(reads1,reads2,bdir,gdir,wcard,threads,read_mismatches,tdir,read_type):
cfg = '''\
genome_dir = '''+gdir+'''
> reads_list1
'''+reads1+'''
<
'''
if reads2:
cfg += '''\
> reads_list2
'''+reads2+'''
<
'''
cfg += '''\
read_format = '''+read_type+'''
mapper = bowtie
temp_path = '''+tdir+'/temp'+'''
out_path = '''+tdir+'/output'+'''
max_intron = 400000
min_intron = 20000
max_multi_hit = 10
seed_mismatch = 1
read_mismatch = '''+str(read_mismatches)+'''
sam_file = cuff
ud_coverage = yes
chromosome_wildcard = '''+wcard+'''
num_chromosome_together = '''+str(threads)+'''
bowtie_base_dir = '''+bdir+'''
num_threads = '''+str(threads)+'''
try_hard = yes'''
return cfg
main()
| apache-2.0 |
empaket/plugin.video.live.proyectoluzdigital1 | cloudflare.py | 221 | 2812 | import sys,traceback,urllib2,re, urllib,xbmc
def createCookie(url,cj=None,agent='Mozilla/5.0 (Windows NT 6.1; rv:32.0) Gecko/20100101 Firefox/32.0'):
urlData=''
try:
import urlparse,cookielib,urllib2
class NoRedirection(urllib2.HTTPErrorProcessor):
def http_response(self, request, response):
return response
def parseJSString(s):
try:
offset=1 if s[0]=='+' else 0
val = int(eval(s.replace('!+[]','1').replace('!![]','1').replace('[]','0').replace('(','str(')[offset:]))
return val
except:
pass
#agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0) Gecko/20100101 Firefox/6.0'
if cj==None:
cj = cookielib.CookieJar()
opener = urllib2.build_opener(NoRedirection, urllib2.HTTPCookieProcessor(cj))
opener.addheaders = [('User-Agent', agent)]
response = opener.open(url)
result=urlData = response.read()
response.close()
# print result
# print response.headers
jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(result)[0]
init = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(result)[0]
builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(result)[0]
decryptVal = parseJSString(init)
lines = builder.split(';')
for line in lines:
if len(line)>0 and '=' in line:
sections=line.split('=')
line_val = parseJSString(sections[1])
decryptVal = int(eval(str(decryptVal)+sections[0][-1]+str(line_val)))
# print urlparse.urlparse(url).netloc
answer = decryptVal + len(urlparse.urlparse(url).netloc)
u='/'.join(url.split('/')[:-1])
query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (u, jschl, answer)
if 'type="hidden" name="pass"' in result:
passval=re.compile('name="pass" value="(.*?)"').findall(result)[0]
query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % (u,urllib.quote_plus(passval), jschl, answer)
xbmc.sleep(4*1000) ##sleep so that the call work
# print query
# import urllib2
# opener = urllib2.build_opener(NoRedirection,urllib2.HTTPCookieProcessor(cj))
# opener.addheaders = [('User-Agent', agent)]
#print opener.headers
response = opener.open(query)
# print response.headers
#cookie = str(response.headers.get('Set-Cookie'))
#response = opener.open(url)
#print cj
# print response.read()
response.close()
return urlData
except:
traceback.print_exc(file=sys.stdout)
return urlData
| gpl-2.0 |
mlufei/depot_tools | third_party/logilab/common/ureports/docbook_writer.py | 93 | 5706 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""HTML formatting drivers for ureports"""
__docformat__ = "restructuredtext en"
from six.moves import range
from logilab.common.ureports import HTMLWriter
class DocbookWriter(HTMLWriter):
"""format layouts as HTML"""
def begin_format(self, layout):
"""begin to format a layout"""
super(HTMLWriter, self).begin_format(layout)
if self.snippet is None:
self.writeln('<?xml version="1.0" encoding="ISO-8859-1"?>')
self.writeln("""
<book xmlns:xi='http://www.w3.org/2001/XInclude'
lang='fr'>
""")
def end_format(self, layout):
"""finished to format a layout"""
if self.snippet is None:
self.writeln('</book>')
def visit_section(self, layout):
"""display a section (using <chapter> (level 0) or <section>)"""
if self.section == 0:
tag = "chapter"
else:
tag = "section"
self.section += 1
self.writeln(self._indent('<%s%s>' % (tag, self.handle_attrs(layout))))
self.format_children(layout)
self.writeln(self._indent('</%s>'% tag))
self.section -= 1
def visit_title(self, layout):
"""display a title using <title>"""
self.write(self._indent(' <title%s>' % self.handle_attrs(layout)))
self.format_children(layout)
self.writeln('</title>')
def visit_table(self, layout):
"""display a table as html"""
self.writeln(self._indent(' <table%s><title>%s</title>' \
% (self.handle_attrs(layout), layout.title)))
self.writeln(self._indent(' <tgroup cols="%s">'% layout.cols))
for i in range(layout.cols):
self.writeln(self._indent(' <colspec colname="c%s" colwidth="1*"/>' % i))
table_content = self.get_table_content(layout)
# write headers
if layout.cheaders:
self.writeln(self._indent(' <thead>'))
self._write_row(table_content[0])
self.writeln(self._indent(' </thead>'))
table_content = table_content[1:]
elif layout.rcheaders:
self.writeln(self._indent(' <thead>'))
self._write_row(table_content[-1])
self.writeln(self._indent(' </thead>'))
table_content = table_content[:-1]
# write body
self.writeln(self._indent(' <tbody>'))
for i in range(len(table_content)):
row = table_content[i]
self.writeln(self._indent(' <row>'))
for j in range(len(row)):
cell = row[j] or ' '
self.writeln(self._indent(' <entry>%s</entry>' % cell))
self.writeln(self._indent(' </row>'))
self.writeln(self._indent(' </tbody>'))
self.writeln(self._indent(' </tgroup>'))
self.writeln(self._indent(' </table>'))
def _write_row(self, row):
"""write content of row (using <row> <entry>)"""
self.writeln(' <row>')
for j in range(len(row)):
cell = row[j] or ' '
self.writeln(' <entry>%s</entry>' % cell)
self.writeln(self._indent(' </row>'))
def visit_list(self, layout):
"""display a list (using <itemizedlist>)"""
self.writeln(self._indent(' <itemizedlist%s>' % self.handle_attrs(layout)))
for row in list(self.compute_content(layout)):
self.writeln(' <listitem><para>%s</para></listitem>' % row)
self.writeln(self._indent(' </itemizedlist>'))
def visit_paragraph(self, layout):
"""display links (using <para>)"""
self.write(self._indent(' <para>'))
self.format_children(layout)
self.writeln('</para>')
def visit_span(self, layout):
"""display links (using <p>)"""
#TODO: translate in docbook
self.write('<literal %s>' % self.handle_attrs(layout))
self.format_children(layout)
self.write('</literal>')
def visit_link(self, layout):
"""display links (using <ulink>)"""
self.write('<ulink url="%s"%s>%s</ulink>' % (layout.url,
self.handle_attrs(layout),
layout.label))
def visit_verbatimtext(self, layout):
"""display verbatim text (using <programlisting>)"""
self.writeln(self._indent(' <programlisting>'))
self.write(layout.data.replace('&', '&').replace('<', '<'))
self.writeln(self._indent(' </programlisting>'))
def visit_text(self, layout):
"""add some text"""
self.write(layout.data.replace('&', '&').replace('<', '<'))
def _indent(self, string):
"""correctly indent string according to section"""
return ' ' * 2*(self.section) + string
| bsd-3-clause |
emreg00/biana | biana/BianaParser/gooboParser.py | 2 | 11522 | """
BIANA: Biologic Interactions and Network Analysis
Copyright (C) 2009 Javier Garcia-Garcia, Emre Guney, Baldo Oliva
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import re
from bianaParser import *
class GOOBOParser(BianaParser):
"""
GO OBO 1.2 Parser Class
"""
name = "go_obo"
description = "This program fills up tables in database biana related with gene ontology"
external_entity_definition = "A external entity represents an ontology element"
external_entity_relations = ""
def __init__(self):
BianaParser.__init__(self, default_db_description = "GO obo",
default_script_name = "gooboParser.py",
default_script_description = "This program fills up tables in database biana related to OBO 1.2 formatted Ontologies")
self.default_eE_attribute = "go"
def parse_database(self):
"""
Method that implements the specific operations of go obo parser
"""
# Add the attributes specific to GO database
self.biana_access.add_valid_external_entity_attribute_type( name = "GO_type",
data_type = "ENUM(\"universal\",\"molecular_function\",\"cellular_component\",\"biological_process\")",
category = "eE attribute")
self.biana_access.add_valid_external_entity_attribute_type( name = "GO_name",
data_type = "varchar(370)",
category = "eE identifier attribute")
# IMPORTANT: As we have added new types and attributes that are not in the default BIANA distribution, we must execute the follwing command:
self.biana_access.refresh_database_information()
# Add the possibility to transfer GO id and GO category using GO as a key
self.biana_access._add_transfer_attribute( externalDatabaseID = self.database.get_id(),
key_attribute = "GO",
transfer_attribute="GO_name" )
ontology = Ontology( source_database = self.database, linkedAttribute="GO", name="GO", descriptionAttribute="GO_name" )
specific_identifiers_and_parent = {}
# Start variables
term_id = None
term_name = None
term_def = None
# GO OBO specific
term_namespace = None
#term_synonyms = []
term_is_a = []
term_part_of = []
term_exact_synonyms = []
term_related_synonyms = []
term_broad_synonyms = []
term_narrow_synonyms = []
term_alt_id = []
self.initialize_input_file_descriptor()
# Adding a dummy relation to let the database know that go_name attribute is also a possible relation attribute
externalEntityRelation = ExternalEntityRelation( source_database = self.database, relation_type = "interaction" )
externalEntityRelation.add_attribute( ExternalEntityRelationAttribute( attribute_identifier = "go_name", value = None) )
def create_external_entity_from_go_term(database, term_id, term_name,
term_namespace, term_def, term_exact_synonyms, term_related_synonyms, term_broad_synonyms, term_narrow_synonyms, term_alt_id):
externalEntity = ExternalEntity( source_database = database, type = "GOElement" )
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "GO", value = term_id, type="unique") )
if term_name is not None:
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "GO_name", value = term_name, type="unique") )
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "GO_type", value = term_namespace, type="unique" ) )
if term_def is not None:
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "description", value = term_def) )
for current_synonym in term_exact_synonyms:
if current_synonym is not None:
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "GO_name",
value = current_synonym,
type = "exact_synonym" ) )
for current_synonym in term_related_synonyms:
if current_synonym is not None:
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "GO_name",
value = current_synonym,
type = "related_synonym" ) )
for current_synonym in term_broad_synonyms:
if current_synonym is not None:
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "GO_name",
value = current_synonym,
type = "broad_synonym" ) )
for current_synonym in term_narrow_synonyms:
if current_synonym is not None:
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "GO_name",
value = current_synonym,
type = "narrow_synonym" ) )
# Quim Aguirre: Adding the alternative GO ids as "alias" in GO table
for current_alt_id in term_alt_id:
if current_alt_id is not None:
externalEntity.add_attribute( ExternalEntityAttribute( attribute_identifier = "GO",
value = current_alt_id,
type = "alias" ) )
return externalEntity
for line in self.input_file_fd:
# Quim Aguirre: I have included to recognise [Typedef], so that the [Term] entries are recorded well when they are finished and there is a [Typedef] afterwards
if re.search("\[Term\]",line) or re.search("\[Typedef\]",line):
# New term
if term_id is not None:
# insert previous
externalEntity = create_external_entity_from_go_term(self.database, term_id, term_name,
term_namespace, term_def, term_exact_synonyms,
term_related_synonyms, term_broad_synonyms, term_narrow_synonyms, term_alt_id)
self.biana_access.insert_new_external_entity( externalEntity )
specific_identifiers_and_parent[term_id] = (externalEntity.get_id(), term_is_a, term_part_of)
# Restart variables
term_id = None
term_name = None
term_def = None
# GO OBO specific
term_namespace = None
#term_synonyms = []
term_is_a = []
term_part_of = []
term_exact_synonyms = []
term_related_synonyms = []
term_broad_synonyms = []
term_narrow_synonyms = []
term_alt_id = []
if re.search("\[Typedef\]",line):
typedef = True
else:
typedef = False
elif re.search("^id\:",line):
if typedef == True: # If typedef tag is true, we do not want to record anything
continue
temp = re.search("GO\:(\d+)",line)
if temp:
term_id = temp.group(1)
elif re.search("^name\:",line):
if typedef == True:
continue
temp = re.search("name:\s+(.+)",line)
term_name = temp.group(1)
elif re.search("^namespace\:",line):
if typedef == True:
continue
temp = re.search("namespace:\s+(.+)",line)
term_namespace = temp.group(1)
elif re.search("^def\:",line):
if typedef == True:
continue
temp = re.search("\"(.+)\"",line)
term_def = temp.group(1)
elif re.search("synonym\:",line):
if typedef == True:
continue
temp = re.search("\"(.+)\"\s+(\w+)",line)
if temp.group(2) == "EXACT":
term_exact_synonyms.append(temp.group(1))
elif temp.group(2) == "RELATED":
term_related_synonyms.append(temp.group(1))
# Quim Aguirre: I have added the broad and narrow synonyms
elif temp.group(2) == "BROAD":
term_broad_synonyms.append(temp.group(1))
elif temp.group(2) == "NARROW":
term_narrow_synonyms.append(temp.group(1))
elif re.search("^alt_id\:",line):
# Quim Aguirre: Recognison of the "alt_id" tags
# Example --> alt_id: GO:0016425
if typedef == True:
continue
temp = re.search("GO\:(\d+)",line)
if temp:
term_alt_id.append(temp.group(1))
elif re.search("is_a\:",line):
if typedef == True:
continue
temp = re.search("GO\:(\d+)",line)
if temp is not None:
#print "??:", line # malformation --> is_a: regulates ! regulates
term_is_a.append(temp.group(1))
elif re.search("relationship\:",line):
if typedef == True:
continue
if( re.search("part_of",line) ):
temp = re.search("part_of\s+GO\:(\d+)",line)
if temp is not None:
term_part_of.append(temp.group(1))
# Insert last term
if term_id is not None:
externalEntity = create_external_entity_from_go_term(self.database, term_id, term_name,
term_namespace, term_def, term_exact_synonyms,
term_related_synonyms, term_broad_synonyms, term_narrow_synonyms, term_alt_id)
self.biana_access.insert_new_external_entity( externalEntity )
specific_identifiers_and_parent[term_id] = (externalEntity.get_id(), term_is_a, term_part_of)
# Set the ontology hierarch and insert elements to ontology
for current_method_id in specific_identifiers_and_parent:
is_a_list = [ specific_identifiers_and_parent[x][0] for x in specific_identifiers_and_parent[current_method_id][1] ]
is_part_of_list = [ specific_identifiers_and_parent[x][0] for x in specific_identifiers_and_parent[current_method_id][2] ]
ontology.add_element( ontologyElementID = specific_identifiers_and_parent[current_method_id][0],
isA = is_a_list,
isPartOf = is_part_of_list )
self.biana_access.insert_new_external_entity(ontology)
| gpl-3.0 |
andyliuliming/azure-linux-extensions | DSC/azure/servicemanagement/sqldatabasemanagementservice.py | 46 | 2557 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure import (
MANAGEMENT_HOST,
_parse_service_resources_response,
)
from azure.servicemanagement import (
Servers,
Database,
)
from azure.servicemanagement.servicemanagementclient import (
_ServiceManagementClient,
)
class SqlDatabaseManagementService(_ServiceManagementClient):
''' Note that this class is a preliminary work on SQL Database
management. Since it lack a lot a features, final version
can be slightly different from the current one.
'''
def __init__(self, subscription_id=None, cert_file=None,
host=MANAGEMENT_HOST):
super(SqlDatabaseManagementService, self).__init__(
subscription_id, cert_file, host)
#--Operations for sql servers ----------------------------------------
def list_servers(self):
'''
List the SQL servers defined on the account.
'''
return self._perform_get(self._get_list_servers_path(),
Servers)
#--Operations for sql databases ----------------------------------------
def list_databases(self, name):
'''
List the SQL databases defined on the specified server name
'''
response = self._perform_get(self._get_list_databases_path(name),
None)
return _parse_service_resources_response(response, Database)
#--Helper functions --------------------------------------------------
def _get_list_servers_path(self):
return self._get_path('services/sqlservers/servers', None)
def _get_list_databases_path(self, name):
# *contentview=generic is mandatory*
return self._get_path('services/sqlservers/servers/',
name) + '/databases?contentview=generic'
| apache-2.0 |
vinaymayar/python-game-workshop | lesson7/hangman.py | 1 | 2668 | # This is a hangman game.
# Your game must do the following things.
# Everytime a user guesses a character, it should tell them if their character
# is in the secret word or not.
#
# Also, it should print the guessed character in the following format
# if the secret word is unicorn and the user guessed the letter 'n'
# you program should print _n____n
#
# It should also print a picture of the current state of the hanged man.
#
# If the user guesses a letter he already guessed, give them a warning.
#
# The user can make at most 6 mistakes.
import random # don't worry about these lines.
from hangman_pics import HANGMANPICS
LIST_OF_WORDS = ['hangman', 'chairs', 'backpack', 'bodywash', 'clothing', 'computer', 'python', 'program', 'glasses', 'sweatshirt', 'sweatpants', 'mattress', 'friends', 'clocks', 'biology', 'algebra', 'suitcase', 'knives', 'ninjas', 'shampoo']
# First let's write a function to select a random word from the list of words.
def getSecretWord():
# this line generates a random number use it to index into the list of words
# and return a secret word.
rnd = random.randint(0, len(LIST_OF_WORDS) - 1)
return ...
secret_word = getSecretWord() # functions help us organize our code!
mistakes = 0
# Now create an empty list to keep track of the letters the user guessed
def string_contains_character(c, word):
# copy your function from lesson6 here.
def hide_word(guesses, secret_word):
hidden_word = ""
for letter in secret_word:
if letter in guesses:
...
# This is the
while(True):
guess = raw_input()
# Check if the guess was a letter that the user already guessed. If so,
# give them a warning and go back to the beginning of the loop.
# If this is a new guess, add it to the list of letters the user guessed.
# Maybe you could use one of the list methods...
# Check if the new guess is in the secret word, using the function
# string_contains_character you wrote on lesson6.
# If the user made a mistake, increase their number of mistakes and let them
# know!
# Now, complete the function hide_word, which takes in the guesses made and
# the secret_word and returns a the word in a hidden format. Remember, if the
# letter was in the guesses, it should appear in the word, if it's not it
# should appear as an underscore "_"
#
# If your hidden word has no underscores, the user won! Let them know about
# that and break out of the loop
print(HANGMANPICS[mistakes]) # this line will print a picture of the hanged man
# If the user made 6 mistakes, tell them the game is over and break out of the
# loop.
| mit |
canmogol/LightGap | web/server.py | 1 | 1418 | from flask import Flask, request, send_from_directory, make_response, redirect
app = Flask(__name__, static_url_path='')
@app.before_request
def before_request():
authenticatedMethods = ['/list', '/another']
session_id = request.cookies.get('session_id')
if session_id is None and any(request.path in s for s in authenticatedMethods):
response = make_response(redirect('/error'))
response.set_cookie('session_id', '', expires=0)
return response
@app.route("/error")
def error():
return '{"error":"unauthorized request"}'
@app.route("/login")
def login():
response = make_response(redirect('/login2'))
response.set_cookie('session_id', '123123123')
return response
@app.route("/login2")
def login2():
return '{"isLogged":"true", "message": "Welcome", "user": "John Doe"}'
@app.route("/list")
def list():
return '[{"id":"1","name":"aaa"},{"id":"2","name":"vvv"},{"id":"3","name":"qqq"},{"id":"4","name":"xxx"}]'
@app.route('/js/<path:path>')
def send_js(path):
return send_from_directory('js', path)
@app.route('/css/<path:path>')
def send_css(path):
return send_from_directory('css', path)
@app.route('/image/<path:path>')
def send_image(path):
return send_from_directory('image', path)
@app.route('/html/<path:path>')
def send_html(path):
return send_from_directory('html', path)
if __name__ == "__main__":
app.run()
| apache-2.0 |
eleonrk/SickRage | sickbeard/providers/immortalseed.py | 2 | 7926 | # coding=utf-8
# Author: Bart Sommer <bart.sommer88@gmail.com>
#
# URL: https://sickchill.github.io
#
# This file is part of SickChill.
#
# SickChill is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickChill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickChill. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
import re
from requests.compat import urljoin
from requests.utils import dict_from_cookiejar
from sickbeard import logger, tvcache
from sickbeard.bs4_parser import BS4Parser
from sickchill.helper.common import convert_size, try_int
from sickchill.helper.exceptions import AuthException
from sickchill.providers.torrent.TorrentProvider import TorrentProvider
class ImmortalseedProvider(TorrentProvider): # pylint: disable=too-many-instance-attributes
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "Immortalseed")
# Credentials
self.username = None
self.password = None
self.passkey = None
# Torrent Stats
self.minseed = None
self.minleech = None
self.freeleech = None
# URLs
self.url = 'https://immortalseed.me/'
self.urls = {
'login': urljoin(self.url, 'takelogin.php'),
'search': urljoin(self.url, 'browse.php'),
'rss': urljoin(self.url, 'rss.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK']
# Cache
self.cache = ImmortalseedCache(self, min_time=20)
def _check_auth(self):
if not self.username or not self.password:
raise AuthException("Your authentication credentials for " + self.name + " are missing, check your config.")
return True
def _check_auth_from_data(self, data):
if not self.passkey:
logger.log('Invalid passkey. Check your settings', logger.WARNING)
return True
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
response = self.get_url(self.urls['login'], post_data=login_params, returns='text')
if not response:
logger.log("Unable to connect to provider", logger.WARNING)
return False
if re.search('Username or password incorrect!', response):
logger.log("Invalid username or password. Check your settings", logger.WARNING)
return False
return True
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if not self.login():
return results
# Search Params
search_params = {
'do': 'search',
'include_dead_torrents': 'no',
'search_type': 't_name',
'category': 0,
'keywords': ''
}
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def process_column_header(td):
td_title = ''
if td.img:
td_title = td.img.get('title', td.get_text(strip=True))
if not td_title:
td_title = td.get_text(strip=True)
return td_title
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params['keywords'] = search_string
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
logger.log("No data returned from provider", logger.DEBUG)
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='sortabletable')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]('td')]
# Skip column headers
for result in torrent_rows[1:]:
try:
title = result.find('div', class_='tooltip-target').get_text(strip=True)
# skip if torrent has been nuked due to poor quality
if title.startswith('Nuked.'):
continue
download_url = result.find(
'img', title='Click to Download this Torrent in SSL!').parent['href']
if not all([title, download_url]):
continue
cells = result('td')
seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log("Discarding torrent because it doesn't meet the"
" minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = cells[labels.index('Size')].get_text(strip=True)
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders,
'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
class ImmortalseedCache(tvcache.TVCache):
def _get_rss_data(self):
params = {
'secret_key': self.provider.passkey,
'feedtype': 'downloadssl',
'timezone': '-5',
'categories': '44,32,7,47,8,48,9',
'showrows': '50',
}
return self.get_rss_feed(self.provider.urls['rss'], params=params)
def _check_auth(self, data):
return self.provider._check_auth_from_data(data) # pylint: disable=protected-access
provider = ImmortalseedProvider()
| gpl-3.0 |
gaolichuang/py-essential | essential/report/models/with_default_views.py | 2 | 2963 | # Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import essential.report.models.base as base_model
import essential.report.views.json.generic as jsonviews
import essential.report.views.text.generic as textviews
import essential.report.views.xml.generic as xmlviews
class ModelWithDefaultViews(base_model.ReportModel):
"""A Model With Default Views of Various Types
A model with default views has several predefined views,
each associated with a given type. This is often used for
when a submodel should have an attached view, but the view
differs depending on the serialization format
Paramaters are as the superclass, with the exception
of any parameters ending in '_view': these parameters
get stored as default views.
The default 'default views' are
text
:class:`essential.report.views.text.generic.KeyValueView`
xml
:class:`essential.report.views.xml.generic.KeyValueView`
json
:class:`essential.report.views.json.generic.KeyValueView`
.. function:: to_type()
('type' is one of the 'default views' defined for this model)
Serializes this model using the default view for 'type'
:rtype: str
:returns: this model serialized as 'type'
"""
def __init__(self, *args, **kwargs):
self.views = {
'text': textviews.KeyValueView(),
'json': jsonviews.KeyValueView(),
'xml': xmlviews.KeyValueView()
}
newargs = copy.copy(kwargs)
for k in kwargs:
if k.endswith('_view'):
self.views[k[:-5]] = kwargs[k]
del newargs[k]
super(ModelWithDefaultViews, self).__init__(*args, **newargs)
def set_current_view_type(self, tp):
self.attached_view = self.views[tp]
super(ModelWithDefaultViews, self).set_current_view_type(tp)
def __getattr__(self, attrname):
if attrname[:3] == 'to_':
if self.views[attrname[3:]] is not None:
return lambda: self.views[attrname[3:]](self)
else:
raise NotImplementedError((
"Model {cn.__module__}.{cn.__name__} does not have" +
" a default view for "
"{tp}").format(cn=type(self), tp=attrname[3:]))
else:
return super(ModelWithDefaultViews, self).__getattr__(attrname)
| apache-2.0 |
vrettasm/VGPA | code/src/gaussian_moments.py | 1 | 6505 | import numpy as np
class GaussianMoments(object):
"""
This class creates an object that returns the higher order
moments, of an non-central Gaussian, up to the 8-th order.
https://en.wikipedia.org/wiki/Normal_distribution
"""
__slots__ = ("m_arr", "v_arr", "n_size")
def __init__(self, m_arr, v_arr):
"""
Constructs an object that holds the marginal means
and variances at all times (t).
:param m_arr: marginal means array (N x 1).
:param v_arr: marginal variances array (N x 1).
"""
# Make sure the inputs are at least 1-D.
m_arr, v_arr = np.atleast_1d(m_arr, v_arr)
# The shapes must match.
if m_arr.shape != v_arr.shape:
raise RuntimeError(" {0}: Input arrays shape mismatch."
" {1} != {2)".format(self.__class__.__name__,
m_arr.shape, v_arr.shape))
# _end_if_
# Store the marginal means and variances.
self.m_arr = m_arr
self.v_arr = v_arr
# Get the size of the arrays.
self.n_size = m_arr.shape[0]
# _end_def_
def __call__(self, order=0):
"""
Compute the correct non-central moment up to 8-th order.
:param order: of the un-centered Gaussian moment.
:return: the un-centered Gaussian moment.
:raises ValueError: if the input order is out of bounds.
"""
if order == 0:
x_out = np.ones(self.n_size)
elif order == 1:
x_out = self.m_arr
elif order == 2:
x_out = self.m_arr ** 2 + self.v_arr
elif order == 3:
x_out = self.m_arr ** 3 +\
3 * self.m_arr * self.v_arr
elif order == 4:
x_out = self.m_arr ** 4 +\
6 * (self.m_arr ** 2) * self.v_arr +\
3 * (self.v_arr ** 2)
elif order == 5:
x_out = self.m_arr ** 5 +\
10 * (self.m_arr ** 3) * self.v_arr +\
15 * self.m_arr * (self.v_arr ** 2)
elif order == 6:
x_out = self.m_arr ** 6 +\
15 * (self.m_arr ** 4) * self.v_arr +\
45 * (self.m_arr ** 2) * (self.v_arr ** 2) +\
15 * (self.v_arr ** 3)
elif order == 7:
x_out = self.m_arr ** 7 +\
21 * (self.m_arr ** 5) * self.v_arr +\
105 * (self.m_arr ** 3) * (self.v_arr ** 2) +\
105 * self.m_arr * (self.v_arr ** 3)
elif order == 8:
x_out = self.m_arr ** 8 +\
28 * (self.m_arr ** 6) * self.v_arr +\
210 * (self.m_arr ** 4) * (self.v_arr ** 2) +\
420 * (self.m_arr ** 2) * (self.v_arr ** 3) +\
105 * (self.v_arr ** 4)
else:
raise ValueError(" {0}: Wrong order value."
" Use values 0-8.".format(self.__class__.__name__))
# _end_if_
return x_out
# _end_def_
def dM(self, order=1):
"""
Compute the derivative with respect to the marginal
means, of the non-central moment, up to 8-th order.
:param order: of the un-centered Gaussian moment.
:return: the derivative with respect to the marginal
means.
:raises ValueError: if the input order is out of bounds.
"""
if order == 1:
x_out = np.ones(self.n_size)
elif order == 2:
x_out = 2 * self.m_arr
elif order == 3:
x_out = 3 * (self.m_arr ** 2 + self.v_arr)
elif order == 4:
x_out = 4 * (self.m_arr ** 3 +
3 * self.m_arr * self.v_arr)
elif order == 5:
x_out = 5 * (self.m_arr ** 4 +
6 * (self.m_arr ** 2) * self.v_arr +
3 * (self.v_arr ** 2))
elif order == 6:
x_out = 6 * (self.m_arr ** 5 +
10 * (self.m_arr ** 3) * self.v_arr +
15 * self.m_arr * (self.v_arr ** 2))
elif order == 7:
x_out = 7 * (self.m_arr ** 6 +
15 * (self.m_arr ** 4) * self.v_arr +
45 * (self.m_arr ** 2) * (self.v_arr ** 2) +
15 * (self.v_arr ** 3))
elif order == 8:
x_out = 8 * (self.m_arr ** 7 +
21 * (self.m_arr ** 5) * self.v_arr +
105 * (self.m_arr ** 3) * (self.v_arr ** 2) +
105 * self.m_arr * (self.v_arr ** 3))
else:
raise ValueError(f" {self.__class__.__name__}:"
f" Wrong order value. Use values 1-8.")
# _end_if_
return x_out
# _end_def_
def dS(self, order=1):
"""
Compute the derivative with respect to the marginal
variances, of the un-centered moment, up to 8-th order.
:param order: of the un-centered Gaussian moment.
:return: the derivative with respect to the marginal
variances.
:raises ValueError: if the input order is out of bounds.
"""
if order == 1:
x_out = np.zeros(self.n_size)
elif order == 2:
x_out = np.ones(self.n_size)
elif order == 3:
x_out = 3 * self.m_arr
elif order == 4:
x_out = 6 * (self.m_arr ** 2 + self.v_arr)
elif order == 5:
x_out = 10 * (self.m_arr ** 3) +\
30 * (self.m_arr * self.v_arr)
elif order == 6:
x_out = 15 * (self.m_arr ** 4) +\
90 * (self.m_arr ** 2) * self.v_arr +\
45 * (self.v_arr ** 2)
elif order == 7:
x_out = 21 * (self.m_arr ** 5) +\
210 * (self.m_arr ** 3) * self.v_arr +\
315 * self.m_arr * (self.v_arr ** 2)
elif order == 8:
x_out = 28 * (self.m_arr ** 6) +\
420 * (self.m_arr ** 4) * self.v_arr +\
1260 * (self.m_arr ** 2) * (self.v_arr ** 2) +\
420 * (self.v_arr ** 3)
else:
raise ValueError(f" {self.__class__.__name__}:"
f" Wrong order value. Use values 1-8.")
# _end_if_
return x_out
# _end_def_
# _end_class_
| gpl-3.0 |
duqiao/django | django/contrib/auth/urls.py | 568 | 1036 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import url
from django.contrib.auth import views
urlpatterns = [
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^password_change/$', views.password_change, name='password_change'),
url(r'^password_change/done/$', views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', views.password_reset_complete, name='password_reset_complete'),
]
| bsd-3-clause |
sander76/home-assistant | homeassistant/components/google_assistant/report_state.py | 5 | 3228 | """Google Report State implementation."""
import logging
from homeassistant.const import MATCH_ALL
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.significant_change import create_checker
from .const import DOMAIN
from .error import SmartHomeError
from .helpers import AbstractConfig, GoogleEntity, async_get_entities
# Time to wait until the homegraph updates
# https://github.com/actions-on-google/smart-home-nodejs/issues/196#issuecomment-439156639
INITIAL_REPORT_DELAY = 60
_LOGGER = logging.getLogger(__name__)
@callback
def async_enable_report_state(hass: HomeAssistant, google_config: AbstractConfig):
"""Enable state reporting."""
checker = None
async def async_entity_state_listener(changed_entity, old_state, new_state):
if not hass.is_running:
return
if not new_state:
return
if not google_config.should_expose(new_state):
return
entity = GoogleEntity(hass, google_config, new_state)
if not entity.is_supported():
return
try:
entity_data = entity.query_serialize()
except SmartHomeError as err:
_LOGGER.debug("Not reporting state for %s: %s", changed_entity, err.code)
return
if not checker.async_is_significant_change(new_state, extra_arg=entity_data):
return
_LOGGER.debug("Reporting state for %s: %s", changed_entity, entity_data)
await google_config.async_report_state_all(
{"devices": {"states": {changed_entity: entity_data}}}
)
@callback
def extra_significant_check(
hass: HomeAssistant,
old_state: str,
old_attrs: dict,
old_extra_arg: dict,
new_state: str,
new_attrs: dict,
new_extra_arg: dict,
):
"""Check if the serialized data has changed."""
return old_extra_arg != new_extra_arg
async def inital_report(_now):
"""Report initially all states."""
nonlocal unsub, checker
entities = {}
checker = await create_checker(hass, DOMAIN, extra_significant_check)
for entity in async_get_entities(hass, google_config):
if not entity.should_expose():
continue
try:
entity_data = entity.query_serialize()
except SmartHomeError:
continue
# Tell our significant change checker that we're reporting
# So it knows with subsequent changes what was already reported.
if not checker.async_is_significant_change(
entity.state, extra_arg=entity_data
):
continue
entities[entity.entity_id] = entity_data
if not entities:
return
await google_config.async_report_state_all({"devices": {"states": entities}})
unsub = hass.helpers.event.async_track_state_change(
MATCH_ALL, async_entity_state_listener
)
unsub = async_call_later(hass, INITIAL_REPORT_DELAY, inital_report)
# pylint: disable=unnecessary-lambda
return lambda: unsub()
| apache-2.0 |
MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/wheel/archive.py | 93 | 2247 | """
Archive tools for wheel.
"""
import os
import time
import logging
import os.path
import zipfile
log = logging.getLogger("wheel")
def archive_wheelfile(base_name, base_dir):
'''Archive all files under `base_dir` in a whl file and name it like
`base_name`.
'''
olddir = os.path.abspath(os.curdir)
base_name = os.path.abspath(base_name)
try:
os.chdir(base_dir)
return make_wheelfile_inner(base_name)
finally:
os.chdir(olddir)
def make_wheelfile_inner(base_name, base_dir='.'):
"""Create a whl file from all the files under 'base_dir'.
Places .dist-info at the end of the archive."""
zip_filename = base_name + ".whl"
log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir)
# Some applications need reproducible .whl files, but they can't do this
# without forcing the timestamp of the individual ZipInfo objects. See
# issue #143.
timestamp = os.environ.get('SOURCE_DATE_EPOCH')
if timestamp is None:
date_time = None
else:
date_time = time.gmtime(int(timestamp))[0:6]
# XXX support bz2, xz when available
zip = zipfile.ZipFile(zip_filename, "w", compression=zipfile.ZIP_DEFLATED)
score = {'WHEEL': 1, 'METADATA': 2, 'RECORD': 3}
deferred = []
def writefile(path, date_time):
st = os.stat(path)
if date_time is None:
mtime = time.gmtime(st.st_mtime)
date_time = mtime[0:6]
zinfo = zipfile.ZipInfo(path, date_time)
zinfo.external_attr = st.st_mode << 16
zinfo.compress_type = zipfile.ZIP_DEFLATED
with open(path, 'rb') as fp:
zip.writestr(zinfo, fp.read())
log.info("adding '%s'" % path)
for dirpath, dirnames, filenames in os.walk(base_dir):
for name in filenames:
path = os.path.normpath(os.path.join(dirpath, name))
if os.path.isfile(path):
if dirpath.endswith('.dist-info'):
deferred.append((score.get(name, 0), path))
else:
writefile(path, date_time)
deferred.sort()
for score, path in deferred:
writefile(path, date_time)
zip.close()
return zip_filename
| mit |
chacoroot/planetary | addons/payment_buckaroo/models/buckaroo.py | 209 | 8422 | # -*- coding: utf-'8' "-*-"
from hashlib import sha1
import logging
import urlparse
from openerp.addons.payment.models.payment_acquirer import ValidationError
from openerp.addons.payment_buckaroo.controllers.main import BuckarooController
from openerp.osv import osv, fields
from openerp.tools.float_utils import float_compare
_logger = logging.getLogger(__name__)
class AcquirerBuckaroo(osv.Model):
_inherit = 'payment.acquirer'
def _get_buckaroo_urls(self, cr, uid, environment, context=None):
""" Buckaroo URLs
"""
if environment == 'prod':
return {
'buckaroo_form_url': 'https://checkout.buckaroo.nl/html/',
}
else:
return {
'buckaroo_form_url': 'https://testcheckout.buckaroo.nl/html/',
}
def _get_providers(self, cr, uid, context=None):
providers = super(AcquirerBuckaroo, self)._get_providers(cr, uid, context=context)
providers.append(['buckaroo', 'Buckaroo'])
return providers
_columns = {
'brq_websitekey': fields.char('WebsiteKey', required_if_provider='buckaroo'),
'brq_secretkey': fields.char('SecretKey', required_if_provider='buckaroo'),
}
def _buckaroo_generate_digital_sign(self, acquirer, inout, values):
""" Generate the shasign for incoming or outgoing communications.
:param browse acquirer: the payment.acquirer browse record. It should
have a shakey in shaky out
:param string inout: 'in' (openerp contacting buckaroo) or 'out' (buckaroo
contacting openerp).
:param dict values: transaction values
:return string: shasign
"""
assert inout in ('in', 'out')
assert acquirer.provider == 'buckaroo'
keys = "add_returndata Brq_amount Brq_culture Brq_currency Brq_invoicenumber Brq_return Brq_returncancel Brq_returnerror Brq_returnreject brq_test Brq_websitekey".split()
def get_value(key):
if values.get(key):
return values[key]
return ''
values = dict(values or {})
if inout == 'out':
if 'BRQ_SIGNATURE' in values:
del values['BRQ_SIGNATURE']
items = sorted((k.upper(), v) for k, v in values.items())
sign = ''.join('%s=%s' % (k, v) for k, v in items)
else:
sign = ''.join('%s=%s' % (k,get_value(k)) for k in keys)
#Add the pre-shared secret key at the end of the signature
sign = sign + acquirer.brq_secretkey
if isinstance(sign, str):
sign = urlparse.parse_qsl(sign)
shasign = sha1(sign).hexdigest()
return shasign
def buckaroo_form_generate_values(self, cr, uid, id, partner_values, tx_values, context=None):
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
acquirer = self.browse(cr, uid, id, context=context)
buckaroo_tx_values = dict(tx_values)
buckaroo_tx_values.update({
'Brq_websitekey': acquirer.brq_websitekey,
'Brq_amount': tx_values['amount'],
'Brq_currency': tx_values['currency'] and tx_values['currency'].name or '',
'Brq_invoicenumber': tx_values['reference'],
'brq_test': False if acquirer.environment == 'prod' else True,
'Brq_return': '%s' % urlparse.urljoin(base_url, BuckarooController._return_url),
'Brq_returncancel': '%s' % urlparse.urljoin(base_url, BuckarooController._cancel_url),
'Brq_returnerror': '%s' % urlparse.urljoin(base_url, BuckarooController._exception_url),
'Brq_returnreject': '%s' % urlparse.urljoin(base_url, BuckarooController._reject_url),
'Brq_culture': (partner_values.get('lang') or 'en_US').replace('_', '-'),
})
if buckaroo_tx_values.get('return_url'):
buckaroo_tx_values['add_returndata'] = {'return_url': '%s' % buckaroo_tx_values.pop('return_url')}
else:
buckaroo_tx_values['add_returndata'] = ''
buckaroo_tx_values['Brq_signature'] = self._buckaroo_generate_digital_sign(acquirer, 'in', buckaroo_tx_values)
return partner_values, buckaroo_tx_values
def buckaroo_get_form_action_url(self, cr, uid, id, context=None):
acquirer = self.browse(cr, uid, id, context=context)
return self._get_buckaroo_urls(cr, uid, acquirer.environment, context=context)['buckaroo_form_url']
class TxBuckaroo(osv.Model):
_inherit = 'payment.transaction'
# buckaroo status
_buckaroo_valid_tx_status = [190]
_buckaroo_pending_tx_status = [790, 791, 792, 793]
_buckaroo_cancel_tx_status = [890, 891]
_buckaroo_error_tx_status = [490, 491, 492]
_buckaroo_reject_tx_status = [690]
_columns = {
'buckaroo_txnid': fields.char('Transaction ID'),
}
# --------------------------------------------------
# FORM RELATED METHODS
# --------------------------------------------------
def _buckaroo_form_get_tx_from_data(self, cr, uid, data, context=None):
""" Given a data dict coming from buckaroo, verify it and find the related
transaction record. """
reference, pay_id, shasign = data.get('BRQ_INVOICENUMBER'), data.get('BRQ_PAYMENT'), data.get('BRQ_SIGNATURE')
if not reference or not pay_id or not shasign:
error_msg = 'Buckaroo: received data with missing reference (%s) or pay_id (%s) or shashign (%s)' % (reference, pay_id, shasign)
_logger.error(error_msg)
raise ValidationError(error_msg)
tx_ids = self.search(cr, uid, [('reference', '=', reference)], context=context)
if not tx_ids or len(tx_ids) > 1:
error_msg = 'Buckaroo: received data for reference %s' % (reference)
if not tx_ids:
error_msg += '; no order found'
else:
error_msg += '; multiple order found'
_logger.error(error_msg)
raise ValidationError(error_msg)
tx = self.pool['payment.transaction'].browse(cr, uid, tx_ids[0], context=context)
#verify shasign
shasign_check = self.pool['payment.acquirer']._buckaroo_generate_digital_sign(tx.acquirer_id, 'out' ,data)
if shasign_check.upper() != shasign.upper():
error_msg = 'Buckaroo: invalid shasign, received %s, computed %s, for data %s' % (shasign, shasign_check, data)
_logger.error(error_msg)
raise ValidationError(error_msg)
return tx
def _buckaroo_form_get_invalid_parameters(self, cr, uid, tx, data, context=None):
invalid_parameters = []
if tx.acquirer_reference and data.get('BRQ_TRANSACTIONS') != tx.acquirer_reference:
invalid_parameters.append(('Transaction Id', data.get('BRQ_TRANSACTIONS'), tx.acquirer_reference))
# check what is buyed
if float_compare(float(data.get('BRQ_AMOUNT', '0.0')), tx.amount, 2) != 0:
invalid_parameters.append(('Amount', data.get('BRQ_AMOUNT'), '%.2f' % tx.amount))
if data.get('BRQ_CURRENCY') != tx.currency_id.name:
invalid_parameters.append(('Currency', data.get('BRQ_CURRENCY'), tx.currency_id.name))
return invalid_parameters
def _buckaroo_form_validate(self, cr, uid, tx, data, context=None):
status_code = int(data.get('BRQ_STATUSCODE','0'))
if status_code in self._buckaroo_valid_tx_status:
tx.write({
'state': 'done',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_pending_tx_status:
tx.write({
'state': 'pending',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
elif status_code in self._buckaroo_cancel_tx_status:
tx.write({
'state': 'cancel',
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return True
else:
error = 'Buckaroo: feedback error'
_logger.info(error)
tx.write({
'state': 'error',
'state_message': error,
'buckaroo_txnid': data.get('BRQ_TRANSACTIONS'),
})
return False
| agpl-3.0 |
allfs/mariadb | storage/tokudb/scripts/sql.bench.summary.py | 16 | 2076 | #!/usr/bin/env python
# summarize the sql-bench trace file
import sys
import re
import os.path
class testreports:
def __init__(self):
self.reports = []
def append(self, report):
self.reports.append(report)
def duration(self, start, stop):
t0 = os.popen('date -d"' + start + '" +%s').readline()
t1 = os.popen('date -d"' + stop + '" +%s').readline()
return int(t1) - int(t0)
def printit(self, i):
report = self.reports[i]
d = self.duration(report["start"], report["stop"])
print "%s %s %6u %s" % (report["result"].upper(), report["start"], d, report["name"])
# print self.reports[i]
def printall(self):
for i in range(len(self.reports)):
self.printit(i)
def stoptime(self, stoptime):
if len(self.reports) > 0:
lastreport = self.reports[-1]
lastreport["stop"] = stoptime
def main():
reports = testreports()
testreport = {}
while 1:
b = sys.stdin.readline()
if b == "": break
b = b.rstrip('\n')
match = re.match("^(\d{8} \d{2}:\d{2}:\d{2})$", b)
if match:
if totaltime == "" and testreport["result"] == "pass":
testreport["result"] = "fail"
testreport["stop"] = match.group(1)
reports.append(testreport)
testreport = {}
continue
match = re.match("^(\d{8} \d{2}:\d{2}:\d{2}) (test-.*)$", b)
if match:
testreport["start"] = match.group(1)
testreport["name"] = match.group(2)
testreport["result"] = "pass"
totaltime = ""
continue
match = re.match(".*Got error|.*Died at", b)
if match: testreport["result"] = "fail"
match = re.match("^Total time|^Estimated total time", b)
if match: totaltime = b
match = re.match("skip", b)
if match: testreport["result"] = "skip"
reports.printall()
return 0
sys.exit(main())
| gpl-2.0 |
robbiet480/home-assistant | homeassistant/components/mcp23017/switch.py | 8 | 2825 | """Support for switch sensor using I2C MCP23017 chip."""
import logging
from adafruit_mcp230xx.mcp23017 import MCP23017 # pylint: disable=import-error
import board # pylint: disable=import-error
import busio # pylint: disable=import-error
import digitalio # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
_LOGGER = logging.getLogger(__name__)
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2C_ADDRESS = "i2c_address"
CONF_PINS = "pins"
CONF_PULL_MODE = "pull_mode"
DEFAULT_INVERT_LOGIC = False
DEFAULT_I2C_ADDRESS = 0x20
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SWITCHES_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
vol.Optional(CONF_I2C_ADDRESS, default=DEFAULT_I2C_ADDRESS): vol.Coerce(int),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MCP23017 devices."""
invert_logic = config.get(CONF_INVERT_LOGIC)
i2c_address = config.get(CONF_I2C_ADDRESS)
i2c = busio.I2C(board.SCL, board.SDA)
mcp = MCP23017(i2c, address=i2c_address)
switches = []
pins = config.get(CONF_PINS)
for pin_num, pin_name in pins.items():
pin = mcp.get_pin(pin_num)
switches.append(MCP23017Switch(pin_name, pin, invert_logic))
add_entities(switches)
class MCP23017Switch(ToggleEntity):
"""Representation of a MCP23017 output pin."""
def __init__(self, name, pin, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._pin = pin
self._invert_logic = invert_logic
self._state = False
self._pin.direction = digitalio.Direction.OUTPUT
self._pin.value = self._invert_logic
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def assumed_state(self):
"""Return true if optimistic updates are used."""
return True
def turn_on(self, **kwargs):
"""Turn the device on."""
self._pin.value = not self._invert_logic
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
self._pin.value = self._invert_logic
self._state = False
self.schedule_update_ha_state()
| apache-2.0 |
40223119/2015w13 | static/Brython3.1.1-20150328-091302/Lib/tokenize.py | 728 | 24424 | """Tokenization help for Python programs.
tokenize(readline) is a generator that breaks a stream of bytes into
Python tokens. It decodes the bytes according to PEP-0263 for
determining source file encoding.
It accepts a readline-like method which is called repeatedly to get the
next line of input (or b"" for EOF). It generates 5-tuples with these
members:
the token type (see token.py)
the token (a string)
the starting (row, column) indices of the token (a 2-tuple of ints)
the ending (row, column) indices of the token (a 2-tuple of ints)
the original line (string)
It is designed to match the working of the Python tokenizer exactly, except
that it produces COMMENT tokens for comments and gives type OP for all
operators. Additionally, all token lists start with an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
__author__ = 'Ka-Ping Yee <ping@lfw.org>'
__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord')
import builtins
import re
import sys
from token import *
from codecs import lookup, BOM_UTF8
import collections
from io import TextIOWrapper
cookie_re = re.compile(r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)', re.ASCII)
import token
__all__ = token.__all__ + ["COMMENT", "tokenize", "detect_encoding",
"NL", "untokenize", "ENCODING", "TokenInfo"]
del token
COMMENT = N_TOKENS
tok_name[COMMENT] = 'COMMENT'
NL = N_TOKENS + 1
tok_name[NL] = 'NL'
ENCODING = N_TOKENS + 2
tok_name[ENCODING] = 'ENCODING'
N_TOKENS += 3
EXACT_TOKEN_TYPES = {
'(': LPAR,
')': RPAR,
'[': LSQB,
']': RSQB,
':': COLON,
',': COMMA,
';': SEMI,
'+': PLUS,
'-': MINUS,
'*': STAR,
'/': SLASH,
'|': VBAR,
'&': AMPER,
'<': LESS,
'>': GREATER,
'=': EQUAL,
'.': DOT,
'%': PERCENT,
'{': LBRACE,
'}': RBRACE,
'==': EQEQUAL,
'!=': NOTEQUAL,
'<=': LESSEQUAL,
'>=': GREATEREQUAL,
'~': TILDE,
'^': CIRCUMFLEX,
'<<': LEFTSHIFT,
'>>': RIGHTSHIFT,
'**': DOUBLESTAR,
'+=': PLUSEQUAL,
'-=': MINEQUAL,
'*=': STAREQUAL,
'/=': SLASHEQUAL,
'%=': PERCENTEQUAL,
'&=': AMPEREQUAL,
'|=': VBAREQUAL,
'^=': CIRCUMFLEXEQUAL,
'<<=': LEFTSHIFTEQUAL,
'>>=': RIGHTSHIFTEQUAL,
'**=': DOUBLESTAREQUAL,
'//': DOUBLESLASH,
'//=': DOUBLESLASHEQUAL,
'@': AT
}
class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line')):
def __repr__(self):
annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
self._replace(type=annotated_type))
@property
def exact_type(self):
if self.type == OP and self.string in EXACT_TOKEN_TYPES:
return EXACT_TOKEN_TYPES[self.string]
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX][0-9a-fA-F]+'
Binnumber = r'0[bB][01]+'
Octnumber = r'0[oO][0-7]+'
Decnumber = r'(?:0+|[1-9][0-9]*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9]+'
Pointfloat = group(r'[0-9]+\.[0-9]*', r'\.[0-9]+') + maybe(Exponent)
Expfloat = r'[0-9]+' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9]+[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
StringPrefix = r'(?:[bB][rR]?|[rR][bB]?|[uU])?'
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the
# longest operators first (e.g., if = came before ==, == would get
# recognized as two instances of =).
Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"!=",
r"//=?", r"->",
r"[+\-*/%&|^=<>]=?",
r"~")
Bracket = '[][(){}]'
Special = group(r'\r?\n', r'\.\.\.', r'[:;.,@]')
Funny = group(Operator, Bracket, Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
def _compile(expr):
return re.compile(expr, re.UNICODE)
endpats = {"'": Single, '"': Double,
"'''": Single3, '"""': Double3,
"r'''": Single3, 'r"""': Double3,
"b'''": Single3, 'b"""': Double3,
"R'''": Single3, 'R"""': Double3,
"B'''": Single3, 'B"""': Double3,
"br'''": Single3, 'br"""': Double3,
"bR'''": Single3, 'bR"""': Double3,
"Br'''": Single3, 'Br"""': Double3,
"BR'''": Single3, 'BR"""': Double3,
"rb'''": Single3, 'rb"""': Double3,
"Rb'''": Single3, 'Rb"""': Double3,
"rB'''": Single3, 'rB"""': Double3,
"RB'''": Single3, 'RB"""': Double3,
"u'''": Single3, 'u"""': Double3,
"R'''": Single3, 'R"""': Double3,
"U'''": Single3, 'U"""': Double3,
'r': None, 'R': None, 'b': None, 'B': None,
'u': None, 'U': None}
triple_quoted = {}
for t in ("'''", '"""',
"r'''", 'r"""', "R'''", 'R"""',
"b'''", 'b"""', "B'''", 'B"""',
"br'''", 'br"""', "Br'''", 'Br"""',
"bR'''", 'bR"""', "BR'''", 'BR"""',
"rb'''", 'rb"""', "rB'''", 'rB"""',
"Rb'''", 'Rb"""', "RB'''", 'RB"""',
"u'''", 'u"""', "U'''", 'U"""',
):
triple_quoted[t] = t
single_quoted = {}
for t in ("'", '"',
"r'", 'r"', "R'", 'R"',
"b'", 'b"', "B'", 'B"',
"br'", 'br"', "Br'", 'Br"',
"bR'", 'bR"', "BR'", 'BR"' ,
"rb'", 'rb"', "rB'", 'rB"',
"Rb'", 'Rb"', "RB'", 'RB"' ,
"u'", 'u"', "U'", 'U"',
):
single_quoted[t] = t
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
def __init__(self):
self.tokens = []
self.prev_row = 1
self.prev_col = 0
self.encoding = None
def add_whitespace(self, start):
row, col = start
assert row <= self.prev_row
col_offset = col - self.prev_col
if col_offset:
self.tokens.append(" " * col_offset)
def untokenize(self, iterable):
for t in iterable:
if len(t) == 2:
self.compat(t, iterable)
break
tok_type, token, start, end, line = t
if tok_type == ENCODING:
self.encoding = token
continue
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
if tok_type in (NEWLINE, NL):
self.prev_row += 1
self.prev_col = 0
return "".join(self.tokens)
def compat(self, token, iterable):
startline = False
indents = []
toks_append = self.tokens.append
toknum, tokval = token
if toknum in (NAME, NUMBER):
tokval += ' '
if toknum in (NEWLINE, NL):
startline = True
prevstring = False
for tok in iterable:
toknum, tokval = tok[:2]
if toknum == ENCODING:
self.encoding = tokval
continue
if toknum in (NAME, NUMBER):
tokval += ' '
# Insert a space between two consecutive strings
if toknum == STRING:
if prevstring:
tokval = ' ' + tokval
prevstring = True
else:
prevstring = False
if toknum == INDENT:
indents.append(tokval)
continue
elif toknum == DEDENT:
indents.pop()
continue
elif toknum in (NEWLINE, NL):
startline = True
elif startline and indents:
toks_append(indents[-1])
startline = False
toks_append(tokval)
def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out
def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \
enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")):
return "iso-8859-1"
return orig_enc
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = 'utf-8'
def read_or_stop():
try:
return readline()
except StopIteration:
return b''
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode('utf-8')
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = '{} for {!r}'.format(msg, filename)
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = "unknown encoding for {!r}: {}".format(filename,
encoding)
raise SyntaxError(msg)
if bom_found:
if encoding != 'utf-8':
# This behaviour mimics the Python interpreter
if filename is None:
msg = 'encoding problem: utf-8'
else:
msg = 'encoding problem for {!r}: utf-8'.format(filename)
raise SyntaxError(msg)
encoding += '-sig'
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
default = 'utf-8-sig'
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second]
def open(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = builtins.open(filename, 'rb')
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
def tokenize(readline):
"""
The tokenize() generator requires one argment, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
"""
# This import is here to avoid problems when the itertools module is not
# built yet and tokenize is imported.
from itertools import chain, repeat
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = repeat(b"")
return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
while True: # loop over lines in stream
try:
line = readline()
except StopIteration:
line = b''
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
yield TokenInfo(NL if parenlev > 0 else NEWLINE,
token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
elif initial in single_quoted or \
token[:2] in single_quoted or \
token[:3] in single_quoted:
if token[-1] == '\n': # continued string
strstart = (lnum, start)
endprog = _compile(endpats[initial] or
endpats[token[1]] or
endpats[token[2]])
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
# An undocumented, backwards compatible, API for all the places in the standard
# library that expect to be able to use tokenize with strings
def generate_tokens(readline):
return _tokenize(readline, None)
def main():
import argparse
# Helper error handling routines
def perror(message):
print(message, file=sys.stderr)
def error(message, filename=None, location=None):
if location:
args = (filename,) + location + (message,)
perror("%s:%d:%d: error: %s" % args)
elif filename:
perror("%s: error: %s" % (filename, message))
else:
perror("error: %s" % message)
sys.exit(1)
# Parse the arguments and options
parser = argparse.ArgumentParser(prog='python -m tokenize')
parser.add_argument(dest='filename', nargs='?',
metavar='filename.py',
help='the file to tokenize; defaults to stdin')
parser.add_argument('-e', '--exact', dest='exact', action='store_true',
help='display token names using the exact type')
args = parser.parse_args()
try:
# Tokenize the input
if args.filename:
filename = args.filename
with builtins.open(filename, 'rb') as f:
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
# Output the tokenization
for token in tokens:
token_type = token.type
if args.exact:
token_type = token.exact_type
token_range = "%d,%d-%d,%d:" % (token.start + token.end)
print("%-20s%-15s%-15r" %
(token_range, tok_name[token_type], token.string))
except IndentationError as err:
line, column = err.args[1][1:3]
error(err.args[0], filename, (line, column))
except TokenError as err:
line, column = err.args[1]
error(err.args[0], filename, (line, column))
except SyntaxError as err:
error(err, filename)
except IOError as err:
error(err)
except KeyboardInterrupt:
print("interrupted\n")
except Exception as err:
perror("unexpected error: %s" % err)
raise
if __name__ == "__main__":
main()
| gpl-3.0 |
tetravision/Test | test/unit/interpreter/autocomplete.py | 13 | 3302 | import unittest
from stem.interpreter.autocomplete import _get_commands, Autocompleter
from test.unit.interpreter import CONTROLLER
try:
# added in python 3.3
from unittest.mock import Mock
except ImportError:
from mock import Mock
class TestAutocompletion(unittest.TestCase):
def test_autocomplete_results_from_config(self):
"""
Check that we load autocompletion results from our configuration.
"""
commands = _get_commands(None)
self.assertTrue('PROTOCOLINFO' in commands)
self.assertTrue('/quit' in commands)
def test_autocomplete_results_from_tor(self):
"""
Check our ability to determine autocompletion results based on our tor
instance's capabilities.
"""
# Check that when GETINFO requests fail we have base commands, but nothing
# with arguments.
controller = Mock()
controller.get_info.return_value = None
commands = _get_commands(controller)
self.assertTrue('GETINFO ' in commands)
self.assertTrue('GETCONF ' in commands)
self.assertTrue('SIGNAL ' in commands)
self.assertFalse('GETINFO info/names' in commands)
self.assertFalse('GETCONF ExitPolicy' in commands)
self.assertFalse('SIGNAL SHUTDOWN' in commands)
# Now check where we should be able to determine tor's capabilities.
commands = _get_commands(CONTROLLER)
expected = (
'GETINFO info/names',
'GETINFO ip-to-country/',
'GETINFO md/id/',
'GETCONF ExitNodes',
'GETCONF ExitPolicy',
'SETCONF ExitPolicy',
'RESETCONF ExitPolicy',
'SETEVENTS BW',
'SETEVENTS INFO',
'USEFEATURE VERBOSE_NAMES',
'USEFEATURE EXTENDED_EVENTS',
'SIGNAL RELOAD',
'SIGNAL SHUTDOWN',
)
for result in expected:
self.assertTrue(result in commands)
# We shouldn't include the base commands since we have results with
# their arguments.
self.assertFalse('GETINFO ' in commands)
self.assertFalse('GETCONF ' in commands)
self.assertFalse('SIGNAL ' in commands)
def test_autocompleter_match(self):
"""
Exercise our Autocompleter's match method.
"""
autocompleter = Autocompleter(None)
self.assertEqual(['/help'], autocompleter.matches('/help'))
self.assertEqual(['/help'], autocompleter.matches('/hel'))
self.assertEqual(['/help'], autocompleter.matches('/he'))
self.assertEqual(['/help'], autocompleter.matches('/h'))
self.assertEqual(['/help', '/events', '/info', '/quit'], autocompleter.matches('/'))
# check case sensitivity
self.assertEqual(['/help'], autocompleter.matches('/HELP'))
self.assertEqual(['/help'], autocompleter.matches('/HeLp'))
# check when we shouldn't have any matches
self.assertEqual([], autocompleter.matches('blarg'))
def test_autocompleter_complete(self):
"""
Exercise our Autocompleter's complete method.
"""
autocompleter = Autocompleter(None)
self.assertEqual('/help', autocompleter.complete('/', 0))
self.assertEqual('/events', autocompleter.complete('/', 1))
self.assertEqual('/info', autocompleter.complete('/', 2))
self.assertEqual('/quit', autocompleter.complete('/', 3))
self.assertEqual(None, autocompleter.complete('/', 4))
self.assertEqual(None, autocompleter.complete('blarg', 0))
| lgpl-3.0 |
ingegus/tipe-corbeillator | actuators/motor.py | 1 | 3785 | import time
import threading
import serial
from actuators.interface import SerialActuator
# TODO : plutôt que de d'envoyer et attendre une réponse, faire en sorte que
# l'arduino envoie en continue sont état, le stocker et comme ça juste lire la
# variable stockée quand on veut la valeur
# => C'est ici qu'il faut le thread différent !
class Motor(SerialActuator):
pwm = 0
rotation = 0 # 0: ANTICLOCKWISE, 1: CLOCKWISE
pos_digit_number = 6 # number of digits in the pos order
_read_thread = None
_prev_speed_measure = (0, 0)
_state = {
'pos' : 0,
'speed': 0
}
def __init__(self, serial_console, pos_digit_number=6, debug=False):
"""
params:
- serial_console: the serial.Serial object used to communicate
- pos_digit_number : number of digits (not including the minus sign) in the position value
- debug: if true, the object becomes verbose
"""
self._pwm = 0
self._rotation = 0
self._prev_speed_measure = (0, time.clock())
self.pos_digit_number = pos_digit_number
# Initiate a separate thread to read value from serial console
#self._read_thread = threading.Thread(target=self._update_motor_state, daemon=True)
super().__init__(serial_console, debug)
#self._read_thread.start()
def _update_motor_state(self):
"""
Reads serial input and store values in the state dict
"""
success = False
self.exec(b'7') # Code to get arduino info
value = self.get_value()
if self._debug:
print('[SERIAL MOTOR] VALUE ', value)
if value.startswith('[') and value.endswith(']'):
values = value.replace('[', '').replace(']', '').split()
if len(values) == 2:
pos, speed = values
if pos.replace('.', '').replace('-', '').isdigit() \
and speed.replace('.', '').replace('-', '').isdigit():
self._state['pos'] = int(float(pos))
self._state['speed'] = float(speed)
success = True
if self._debug:
print('[SERIAL MOTOR] STATE UPDATE : ', self._state)
if self._debug and not success:
print('[SERIAL MOTOR] COULD NOT UPDATE STATE')
@property
def speed(self):
"""
Computes and returns the current speed of the motor in rad/s
"""
self._update_motor_state()
return self._state['speed']
@speed.setter
def speed(self, value):
"""
Sets the speed asked if in the range the maxspeed value
of the motor or set it to max
params:
- value : speed in pwm [-255, 255]
"""
if not type(value) in [int, float]:
raise ValueError('The speed must be an int or a float, not a ' + str(type(value)))
pwm = min(abs(value), 255)
# Fill with zeros so that the number of characters is always 3
pwm = '0' * (3 - len(str(pwm))) + str(pwm)
rotation = (0, 1)[value >= 0]
order = bytes('9' + str(rotation) + pwm, 'ascii')
if self._debug:
print('[SERIAL MOTOR] SENT ORDER : ', order)
result = self.exec(order)
@property
def position(self):
self._update_motor_state()
return self._state['pos']
@position.setter
def position(self, value):
"""
Send a position order to the serial port
"""
if type(value) != int:
raise TypeError('Position must be an int')
nb_digits = len(str(abs(value)))
if nb_digits > self.pos_digit_number:
raise ValueError('Position value too big: ' + str(nb_digits) + ' greater than ' + int(self.pos_digit_number))
order = '8'
order += ('+', '-')[value < 0]
order += '0'*(self.pos_digit_number - nb_digits)
order += str(abs(value))
order = bytes(order, 'ascii')
if self._debug:
print('[SERIAL MOTOR] SENT ORDER : ', order)
self.exec(order)
# arduino_console = serial.Serial('COM3', 9600, timeout=1, write_timeout=2)
# belt_motor = Motor(arduino_console)
# # avoids some bugs
# time.sleep(0.5)
# belt_motor.speed = 200
# time.sleep(1)
# belt_motor.speed = 0 | mit |
dstockwell/catapult | third_party/mapreduce/mapreduce/util.py | 36 | 13063 | #!/usr/bin/env python
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use with the mapreduce library."""
# pylint: disable=g-bad-name
__all__ = [
"create_datastore_write_config",
"for_name",
"get_queue_name",
"get_short_name",
"handler_for_name",
"is_generator",
"parse_bool",
"total_seconds",
"try_serialize_handler",
"try_deserialize_handler",
"CALLBACK_MR_ID_TASK_HEADER",
"strip_prefix_from_items"
]
import inspect
import os
import pickle
import random
import sys
import time
import types
from google.appengine.ext import ndb
from google.appengine.datastore import datastore_rpc
from mapreduce import parameters
# Taskqueue task header for mr id. Use internal by MR.
_MR_ID_TASK_HEADER = "AE-MR-ID"
_MR_SHARD_ID_TASK_HEADER = "AE-MR-SHARD-ID"
# Callback task MR ID task header
CALLBACK_MR_ID_TASK_HEADER = "Mapreduce-Id"
# Ridiculous future UNIX epoch time, 500 years from now.
_FUTURE_TIME = 2**34
def _get_descending_key(gettime=time.time):
"""Returns a key name lexically ordered by time descending.
This lets us have a key name for use with Datastore entities which returns
rows in time descending order when it is scanned in lexically ascending order,
allowing us to bypass index building for descending indexes.
Args:
gettime: Used for testing.
Returns:
A string with a time descending key.
"""
now_descending = int((_FUTURE_TIME - gettime()) * 100)
request_id_hash = os.environ.get("REQUEST_ID_HASH")
if not request_id_hash:
request_id_hash = str(random.getrandbits(32))
return "%d%s" % (now_descending, request_id_hash)
def _get_task_host():
"""Get the Host header value for all mr tasks.
Task Host header determines which instance this task would be routed to.
Current version id format is: v7.368834058928280579
Current module id is just the module's name. It could be "default"
Default version hostname is app_id.appspot.com
Returns:
A complete host name is of format version.module.app_id.appspot.com
If module is the default module, just version.app_id.appspot.com. The reason
is if an app doesn't have modules enabled and the url is
"version.default.app_id", "version" is ignored and "default" is used as
version. If "default" version doesn't exist, the url is routed to the
default version.
"""
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
default_host = os.environ["DEFAULT_VERSION_HOSTNAME"]
module = os.environ["CURRENT_MODULE_ID"]
if os.environ["CURRENT_MODULE_ID"] == "default":
return "%s.%s" % (version, default_host)
return "%s.%s.%s" % (version, module, default_host)
def _get_task_headers(map_job_id,
mr_id_header_key=_MR_ID_TASK_HEADER):
"""Get headers for all mr tasks.
Args:
map_job_id: map job id.
mr_id_header_key: the key to set mr id with.
Returns:
A dictionary of all headers.
"""
return {mr_id_header_key: map_job_id,
"Host": _get_task_host()}
def _enum(**enums):
"""Helper to create enum."""
return type("Enum", (), enums)
def get_queue_name(queue_name):
"""Determine which queue MR should run on.
How to choose the queue:
1. If user provided one, use that.
2. If we are starting a mr from taskqueue, inherit that queue.
If it's a special queue, fall back to the default queue.
3. Default queue.
If user is using any MR pipeline interface, pipeline.start takes a
"queue_name" argument. The pipeline will run on that queue and MR will
simply inherit the queue_name.
Args:
queue_name: queue_name from user. Maybe None.
Returns:
The queue name to run on.
"""
if queue_name:
return queue_name
queue_name = os.environ.get("HTTP_X_APPENGINE_QUEUENAME",
parameters.config.QUEUE_NAME)
if len(queue_name) > 1 and queue_name[0:2] == "__":
# We are currently in some special queue. E.g. __cron.
return parameters.config.QUEUE_NAME
else:
return queue_name
def total_seconds(td):
"""convert a timedelta to seconds.
This is patterned after timedelta.total_seconds, which is only
available in python 27.
Args:
td: a timedelta object.
Returns:
total seconds within a timedelta. Rounded up to seconds.
"""
secs = td.seconds + td.days * 24 * 3600
if td.microseconds:
secs += 1
return secs
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find.
recursive: run recursively or not.
Returns:
class object or None if fq_name is None.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
# if "." not in fq_name:
# raise ImportError("'%s' is not a full-qualified name" % fq_name)
if fq_name is None:
return
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def handler_for_name(fq_name):
"""Resolves and instantiates handler by fully qualified name.
First resolves the name using for_name call. Then if it resolves to a class,
instantiates a class, if it resolves to a method - instantiates the class and
binds method to the instance.
Args:
fq_name: fully qualified name of something to find.
Returns:
handler instance which is ready to be called.
"""
resolved_name = for_name(fq_name)
if isinstance(resolved_name, (type, types.ClassType)):
# create new instance if this is type
return resolved_name()
elif isinstance(resolved_name, types.MethodType):
# bind the method
return getattr(resolved_name.im_class(), resolved_name.__name__)
else:
return resolved_name
def try_serialize_handler(handler):
"""Try to serialize map/reduce handler.
Args:
handler: handler function/instance. Handler can be a function or an
instance of a callable class. In the latter case, the handler will
be serialized across slices to allow users to save states.
Returns:
serialized handler string or None.
"""
if (isinstance(handler, types.InstanceType) or # old style class
(isinstance(handler, object) and # new style class
not inspect.isfunction(handler) and
not inspect.ismethod(handler)) and
hasattr(handler, "__call__")):
return pickle.dumps(handler)
return None
def try_deserialize_handler(serialized_handler):
"""Reverse function of try_serialize_handler.
Args:
serialized_handler: serialized handler str or None.
Returns:
handler instance or None.
"""
if serialized_handler:
return pickle.loads(serialized_handler)
def is_generator(obj):
"""Return true if the object is generator or generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
if isinstance(obj, types.GeneratorType):
return True
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
def get_short_name(fq_name):
"""Returns the last component of the name."""
return fq_name.split(".")[-1:][0]
def parse_bool(obj):
"""Return true if the object represents a truth value, false otherwise.
For bool and numeric objects, uses Python's built-in bool function. For
str objects, checks string against a list of possible truth values.
Args:
obj: object to determine boolean value of; expected
Returns:
Boolean value according to 5.1 of Python docs if object is not a str
object. For str objects, return True if str is in TRUTH_VALUE_SET
and False otherwise.
http://docs.python.org/library/stdtypes.html
"""
if type(obj) is str:
TRUTH_VALUE_SET = ["true", "1", "yes", "t", "on"]
return obj.lower() in TRUTH_VALUE_SET
else:
return bool(obj)
def create_datastore_write_config(mapreduce_spec):
"""Creates datastore config to use in write operations.
Args:
mapreduce_spec: current mapreduce specification as MapreduceSpec.
Returns:
an instance of datastore_rpc.Configuration to use for all write
operations in the mapreduce.
"""
force_writes = parse_bool(mapreduce_spec.params.get("force_writes", "false"))
if force_writes:
return datastore_rpc.Configuration(force_writes=force_writes)
else:
# dev server doesn't support force_writes.
return datastore_rpc.Configuration()
def _set_ndb_cache_policy():
"""Tell NDB to never cache anything in memcache or in-process.
This ensures that entities fetched from Datastore input_readers via NDB
will not bloat up the request memory size and Datastore Puts will avoid
doing calls to memcache. Without this you get soft memory limit exits,
which hurts overall throughput.
"""
ndb_ctx = ndb.get_context()
ndb_ctx.set_cache_policy(lambda key: False)
ndb_ctx.set_memcache_policy(lambda key: False)
def _obj_to_path(obj):
"""Returns the fully qualified path to the object.
Args:
obj: obj must be a new style top level class, or a top level function.
No inner function or static method.
Returns:
Fully qualified path to the object.
Raises:
TypeError: when argument obj has unsupported type.
ValueError: when obj can't be discovered on the top level.
"""
if obj is None:
return obj
if inspect.isclass(obj) or inspect.isfunction(obj):
fetched = getattr(sys.modules[obj.__module__], obj.__name__, None)
if fetched is None:
raise ValueError(
"Object %r must be defined on the top level of a module." % obj)
return "%s.%s" % (obj.__module__, obj.__name__)
raise TypeError("Unexpected type %s." % type(obj))
def strip_prefix_from_items(prefix, items):
"""Strips out the prefix from each of the items if it is present.
Args:
prefix: the string for that you wish to strip from the beginning of each
of the items.
items: a list of strings that may or may not contain the prefix you want
to strip out.
Returns:
items_no_prefix: a copy of the list of items (same order) without the
prefix (if present).
"""
items_no_prefix = []
for item in items:
if item.startswith(prefix):
items_no_prefix.append(item[len(prefix):])
else:
items_no_prefix.append(item)
return items_no_prefix
| bsd-3-clause |
wwj718/murp-edx | common/test/acceptance/fixtures/discussion.py | 5 | 3098 | """
Tools for creating discussion content fixture data.
"""
from datetime import datetime
import json
import factory
import requests
from . import COMMENTS_STUB_URL
class ContentFactory(factory.Factory):
FACTORY_FOR = dict
id = None
user_id = "dummy-user-id"
username = "dummy-username"
course_id = "dummy-course-id"
commentable_id = "dummy-commentable-id"
anonymous = False
anonymous_to_peers = False
at_position_list = []
abuse_flaggers = []
created_at = datetime.utcnow().isoformat()
updated_at = datetime.utcnow().isoformat()
endorsed = False
closed = False
votes = {"up_count": 0}
class Thread(ContentFactory):
comments_count = 0
unread_comments_count = 0
title = "dummy thread title"
body = "dummy thread body"
type = "thread"
group_id = None
pinned = False
read = False
class Comment(ContentFactory):
thread_id = None
depth = 0
type = "comment"
body = "dummy comment body"
class Response(Comment):
depth = 1
body = "dummy response body"
class SearchResult(factory.Factory):
FACTORY_FOR = dict
discussion_data = []
annotated_content_info = {}
num_pages = 1
page = 1
corrected_text = None
class DiscussionContentFixture(object):
def push(self):
"""
Push the data to the stub comments service.
"""
requests.put(
'{}/set_config'.format(COMMENTS_STUB_URL),
data=self.get_config_data()
)
def get_config_data(self):
"""
return a dictionary with the fixture's data serialized for PUTting to the stub server's config endpoint.
"""
raise NotImplementedError()
class SingleThreadViewFixture(DiscussionContentFixture):
def __init__(self, thread):
self.thread = thread
def addResponse(self, response, comments=[]):
response['children'] = comments
self.thread.setdefault('children', []).append(response)
self.thread['comments_count'] += len(comments) + 1
def _get_comment_map(self):
"""
Generate a dict mapping each response/comment in the thread
by its `id`.
"""
def _visit(obj):
res = []
for child in obj.get('children', []):
res.append((child['id'], child))
if 'children' in child:
res += _visit(child)
return res
return dict(_visit(self.thread))
def get_config_data(self):
return {
"threads": json.dumps({self.thread['id']: self.thread}),
"comments": json.dumps(self._get_comment_map())
}
class UserProfileViewFixture(DiscussionContentFixture):
def __init__(self, threads):
self.threads = threads
def get_config_data(self):
return {"active_threads": json.dumps(self.threads)}
class SearchResultFixture(DiscussionContentFixture):
def __init__(self, result):
self.result = result
def get_config_data(self):
return {"search_result": json.dumps(self.result)}
| agpl-3.0 |
wdv4758h/ZipPy | lib-python/3/ctypes/test/test_returnfuncptrs.py | 49 | 1567 | import unittest
from ctypes import *
import _ctypes_test
class ReturnFuncPtrTestCase(unittest.TestCase):
def test_with_prototype(self):
# The _ctypes_test shared lib/dll exports quite some functions for testing.
# The get_strchr function returns a *pointer* to the C strchr function.
dll = CDLL(_ctypes_test.__file__)
get_strchr = dll.get_strchr
get_strchr.restype = CFUNCTYPE(c_char_p, c_char_p, c_char)
strchr = get_strchr()
self.assertEqual(strchr(b"abcdef", b"b"), b"bcdef")
self.assertEqual(strchr(b"abcdef", b"x"), None)
self.assertEqual(strchr(b"abcdef", 98), b"bcdef")
self.assertEqual(strchr(b"abcdef", 107), None)
self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0)
self.assertRaises(TypeError, strchr, b"abcdef")
def test_without_prototype(self):
dll = CDLL(_ctypes_test.__file__)
get_strchr = dll.get_strchr
# the default 'c_int' would not work on systems where sizeof(int) != sizeof(void *)
get_strchr.restype = c_void_p
addr = get_strchr()
# _CFuncPtr instances are now callable with an integer argument
# which denotes a function address:
strchr = CFUNCTYPE(c_char_p, c_char_p, c_char)(addr)
self.assertTrue(strchr(b"abcdef", b"b"), "bcdef")
self.assertEqual(strchr(b"abcdef", b"x"), None)
self.assertRaises(ArgumentError, strchr, b"abcdef", 3.0)
self.assertRaises(TypeError, strchr, b"abcdef")
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
xiangel/hue | desktop/core/ext-py/Django-1.6.10/django/http/utils.py | 134 | 1501 | """
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def fix_location_header(request, response):
"""
Ensures that we always use an absolute URI in any location header in the
response. This is required by RFC 2616, section 14.30.
Code constructing response objects is free to insert relative paths, as
this function converts them to absolute paths.
"""
if 'Location' in response and request.get_host():
response['Location'] = request.build_absolute_uri(response['Location'])
return response
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 2616, section 4.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
| apache-2.0 |
canavandl/bokeh | examples/embed/widget.py | 43 | 4239 | class Population(object):
year = 2010
location = "World"
def __init__(self):
from bokeh.document import Document
from bokeh.session import Session
from bokeh.models import ColumnDataSource
from bokeh.sampledata.population import load_population
self.document = Document()
self.session = Session()
self.session.use_doc('population')
self.session.load_document(self.document)
self.df = load_population()
self.source_pyramid = ColumnDataSource(data=dict())
def render(self):
self.pyramid_plot()
self.create_layout()
self.document.add(self.layout)
self.update_pyramid()
def pyramid_plot(self):
from bokeh.models import (
Plot, DataRange1d, LinearAxis, Grid, Legend,
SingleIntervalTicker
)
from bokeh.models.glyphs import Quad
xdr = DataRange1d()
ydr = DataRange1d()
self.plot = Plot(title=None, x_range=xdr, y_range=ydr,
plot_width=600, plot_height=600)
xaxis = LinearAxis()
self.plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=5))
self.plot.add_layout(yaxis, 'left')
self.plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
self.plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
male_quad = Quad(left="male", right=0, bottom="groups", top="shifted",
fill_color="#3B8686")
male_quad_glyph = self.plot.add_glyph(self.source_pyramid, male_quad)
female_quad = Quad(left=0, right="female", bottom="groups", top="shifted",
fill_color="#CFF09E")
female_quad_glyph = self.plot.add_glyph(self.source_pyramid, female_quad)
self.plot.add_layout(Legend(legends=dict(Male=[male_quad_glyph],
Female=[female_quad_glyph])))
def on_year_change(self, obj, attr, old, new):
self.year = int(new)
self.update_pyramid()
def on_location_change(self, obj, attr, old, new):
self.location = new
self.update_pyramid()
def create_layout(self):
from bokeh.models.widgets import Select, HBox, VBox
years = list(map(str, sorted(self.df.Year.unique())))
locations = sorted(self.df.Location.unique())
year_select = Select(title="Year:", value="2010", options=years)
location_select = Select(title="Location:", value="World", options=locations)
year_select.on_change('value', self.on_year_change)
location_select.on_change('value', self.on_location_change)
controls = HBox(year_select, location_select)
self.layout = VBox(controls, self.plot)
def update_pyramid(self):
pyramid = self.df[(self.df.Location == self.location) & (self.df.Year == self.year)]
male = pyramid[pyramid.Sex == "Male"]
female = pyramid[pyramid.Sex == "Female"]
total = male.Value.sum() + female.Value.sum()
male_percent = -male.Value / total
female_percent = female.Value / total
groups = male.AgeGrpStart.tolist()
shifted = groups[1:] + [groups[-1] + 5]
self.source_pyramid.data = dict(
groups=groups,
shifted=shifted,
male=male_percent,
female=female_percent,
)
self.session.store_document(self.document)
import bokeh.embed as embed
pop = Population()
pop.render()
tag = embed.autoload_server(pop.layout, pop.session)
html = """
<html>
<head></head>
<body>
%s
</body>
</html>
"""
html = html % (tag)
with open("population_embed.html", "w+") as f:
f.write(html)
print("""
To view this example, run
python -m SimpleHTTPServer (or http.server on python 3)
in this directory, then navigate to
http://localhost:8000/population_embed.html
""")
import time
link = pop.session.object_link(pop.document.context)
print("""You can also go to
%s
to see the plots on the Bokeh server directly""" % link)
try:
while True:
pop.session.load_document(pop.document)
time.sleep(0.1)
except KeyboardInterrupt:
print()
| bsd-3-clause |
antoine-de/navitia | source/jormungandr/jormungandr/interfaces/v1/serializer/fields.py | 1 | 7355 | # This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import operator
import serpy
from flask import g
from jormungandr.interfaces.v1.make_links import create_internal_link
from jormungandr.interfaces.v1.serializer import jsonschema
from jormungandr.interfaces.v1.serializer.base import EnumField, PbNestedSerializer, DoubleToStringField
from jormungandr.interfaces.v1.serializer.jsonschema import IntField
from jormungandr.interfaces.v1.serializer.jsonschema.fields import StrField, BoolField, Field, DateTimeType
from navitiacommon import response_pb2
point_2D_schema = {
'type': 'array',
'items': {
'type': 'array',
'items': {
'type': 'number',
'format': 'float'
}
}
}
class MultiLineStringField(jsonschema.Field):
class MultiLineStringSchema(serpy.Serializer):
"""used not as a serializer, but only for the schema"""
type = StrField()
coordinates = jsonschema.Field(schema_metadata={
'type': 'array',
'items': point_2D_schema
})
def __init__(self, **kwargs):
super(MultiLineStringField, self).__init__(schema_type=MultiLineStringField.MultiLineStringSchema,
**kwargs)
def to_value(self, value):
if getattr(g, 'disable_geojson', False):
return None
lines = []
for l in value.lines:
lines.append([[c.lon, c.lat] for c in l.coordinates])
return {
"type": "MultiLineString",
"coordinates": lines,
}
class PropertySerializer(serpy.Serializer):
name = jsonschema.Field(schema_type=str)
value = jsonschema.Field(schema_type=str)
class FeedPublisherSerializer(PbNestedSerializer):
id = StrField(display_none=True)
name = StrField()
url = StrField()
license = StrField()
class ErrorSerializer(PbNestedSerializer):
id = EnumField(attr='id', display_none=True)
message = StrField()
class CoordSerializer(serpy.Serializer):
lon = DoubleToStringField()
lat = DoubleToStringField()
class CodeSerializer(serpy.Serializer):
type = jsonschema.Field(schema_type=str)
value = jsonschema.Field(schema_type=str)
class CommentSerializer(serpy.Serializer):
value = jsonschema.Field(schema_type=str)
type = jsonschema.Field(schema_type=str)
class FirstCommentField(jsonschema.Field):
def __init__(self, **kwargs):
super(FirstCommentField, self).__init__(schema_type=str, **kwargs)
"""
for compatibility issue we want to continue to output a 'comment' field
even if now we have a list of comments, so we take the first one
"""
def as_getter(self, serializer_field_name, serializer_cls):
op = operator.attrgetter(self.attr or serializer_field_name)
def getter(v):
return next(iter(op(v)), None)
return getter
def to_value(self, item):
if item:
return item.value
else:
return None
class RoundedField(IntField):
def to_value(self, value):
if value is None:
return None
return int(round(value))
class LinkSchema(serpy.Serializer):
"""This Class is not used as a serializer, but here only to get the schema of a link"""
id = StrField()
title = StrField()
rel = StrField()
templated = BoolField()
internal = BoolField()
type = StrField()
class DisruptionLinkSerializer(jsonschema.Field):
"""
Add link to disruptions on a pt object
"""
def __init__(self, **kwargs):
super(DisruptionLinkSerializer, self).__init__(schema_type=LinkSchema(many=True), **kwargs)
def to_value(self, value):
return [create_internal_link(_type="disruption", rel="disruptions", id=uri)
for uri in value]
class PaginationSerializer(serpy.Serializer):
total_result = IntField(attr='totalResult', display_none=True)
start_page = IntField(attr='startPage', display_none=True)
items_per_page = IntField(attr='itemsPerPage', display_none=True)
items_on_page = IntField(attr='itemsOnPage', display_none=True)
class SectionGeoJsonField(jsonschema.Field):
class SectionGeoJsonSchema(serpy.Serializer):
"""used not as a serializer, but only for the schema"""
type = StrField()
coordinates = jsonschema.Field(schema_metadata=point_2D_schema)
def __init__(self, **kwargs):
super(SectionGeoJsonField, self).__init__(schema_type=SectionGeoJsonField.SectionGeoJsonSchema,
**kwargs)
def as_getter(self, serializer_field_name, serializer_cls):
def getter(v):
return v
return getter
def to_value(self, value):
if not hasattr(value, 'type'):
return None
coords = []
if value.type == response_pb2.STREET_NETWORK:
coords = value.street_network.coordinates
elif value.type == response_pb2.CROW_FLY and len(value.shape) != 0:
coords = value.shape
elif value.type == response_pb2.PUBLIC_TRANSPORT:
coords = value.shape
elif value.type == response_pb2.TRANSFER:
coords.append(value.origin.stop_point.coord)
coords.append(value.destination.stop_point.coord)
else:
return
response = {
"type": "LineString",
"coordinates": [],
"properties": [{
"length": 0 if not value.HasField(str("length")) else value.length
}]
}
for coord in coords:
response["coordinates"].append([coord.lon, coord.lat])
return response
class NoteSerializer(serpy.Serializer):
type = jsonschema.Field(schema_type=str)
id = jsonschema.Field(schema_type=str, display_none=True)
value = jsonschema.Field(schema_type=str)
category = jsonschema.Field(schema_type=str, schema_metadata={'enum': ['comment', 'terminus']})
class ExceptionSerializer(serpy.Serializer):
type = jsonschema.Field(schema_type=str)
id = jsonschema.Field(schema_type=str, display_none=True)
date = Field(attr='date', schema_type=DateTimeType)
| agpl-3.0 |
geekboxzone/lollipop_external_chromium_org | tools/memory_inspector/memory_inspector/backends/memdump_parser.py | 36 | 3232 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This parser turns the am memdump output into a |memory_map.Map| instance."""
import base64
import logging
import re
from memory_inspector.core import memory_map
def Parse(lines):
"""Parses the output of memdump.
memdump (see chrome/src/tools/memdump) is a Linux/Android binary meant to be
executed on the target device which extracts memory map information about one
or more processes. In principle is can be seen as an alternative to cat-ing
/proc/PID/smaps, but with extra features (multiprocess accounting and resident
pages reporting).
The expected memdump output looks like this:
------------------------------------------------------------------------------
[ PID=1234]
1000-2000 r-xp 0 private_unevictable=4096 private=8192 shared_app=[] \
shared_other_unevictable=4096 shared_other=4096 "/lib/foo.so" [v///fv0D]
... other entries like the one above.
------------------------------------------------------------------------------
The output is extremely similar to /proc/PID/smaps, with the following notes:
- unevictable has pretty much the same meaning of "dirty", in VM terms.
- private and shared_other are cumulative. This means the the "clean" part
must be calculated as difference of (private - private_unevictable).
- The final field [v///fv0D] is a base64 encoded bitmap which contains the
information about which pages inside the mapping are resident (present).
See tests/android_backend_test.py for a more complete example.
Args:
lines: array of strings containing memdump output.
Returns:
An instance of |memory_map.Map|.
"""
RE = (r'^([0-9a-f]+)-([0-9a-f]+)\s+'
r'([rwxps-]{4})\s+'
r'([0-9a-f]+)\s+'
r'private_unevictable=(\d+) private=(\d+) '
r'shared_app=(.*?) '
r'shared_other_unevictable=(\d+) shared_other=(\d+) '
r'\"(.*)\" '
r'\[([a-zA-Z0-9+/=-_:]*)\]$')
map_re = re.compile(RE)
skip_first_n_lines = 1
maps = memory_map.Map()
for line in lines:
line = line.rstrip('\r\n')
if skip_first_n_lines > 0:
skip_first_n_lines -= 1
continue
m = map_re.match(line)
if not m:
logging.warning('Skipping unrecognized memdump line "%s"' % line)
continue
start = int(m.group(1), 16)
end = int(m.group(2), 16) - 1 # end addr is inclusive in memdump output.
if (start > end):
# Sadly, this actually happened. Probably a kernel bug, see b/17402069.
logging.warning('Skipping unfeasible mmap "%s"' % line)
continue
entry = memory_map.MapEntry(
start=start,
end=end,
prot_flags=m.group(3),
mapped_file=m.group(10),
mapped_offset=int(m.group(4), 16))
entry.priv_dirty_bytes = int(m.group(5))
entry.priv_clean_bytes = int(m.group(6)) - entry.priv_dirty_bytes
entry.shared_dirty_bytes = int(m.group(8))
entry.shared_clean_bytes = int(m.group(9)) - entry.shared_dirty_bytes
entry.resident_pages = [ord(c) for c in base64.b64decode(m.group(11))]
maps.Add(entry)
return maps
| bsd-3-clause |
berrange/nova | nova/tests/network/test_manager.py | 4 | 152192 | # Copyright 2011 Rackspace
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import fixtures
import mock
import mox
import netaddr
from oslo.config import cfg
from oslo.db import exception as db_exc
from oslo import messaging
import six
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import ipv6
from nova.network import floating_ips
from nova.network import linux_net
from nova.network import manager as network_manager
from nova.network import model as net_model
from nova import objects
from nova.objects import quotas as quotas_obj
from nova.objects import virtual_interface as vif_obj
from nova.openstack.common import importutils
from nova.openstack.common import log as logging
from nova.openstack.common import processutils
from nova import quota
from nova import test
from nova.tests import fake_instance
from nova.tests import fake_ldap
from nova.tests import fake_network
from nova.tests import matchers
from nova.tests.objects import test_fixed_ip
from nova.tests.objects import test_floating_ip
from nova.tests.objects import test_network
from nova.tests.objects import test_service
from nova import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
HOST = "testhost"
FAKEUUID = "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa"
fake_inst = fake_instance.fake_db_instance
networks = [{'id': 0,
'uuid': FAKEUUID,
'label': 'test0',
'injected': False,
'multi_host': False,
'cidr': '192.168.0.0/24',
'cidr_v6': '2001:db8::/64',
'gateway_v6': '2001:db8::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa0',
'bridge_interface': 'fake_fa0',
'gateway': '192.168.0.1',
'dhcp_server': '192.168.0.1',
'broadcast': '192.168.0.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.0.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'},
{'id': 1,
'uuid': 'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'label': 'test1',
'injected': False,
'multi_host': False,
'cidr': '192.168.1.0/24',
'cidr_v6': '2001:db9::/64',
'gateway_v6': '2001:db9::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.1.1',
'dhcp_server': '192.168.1.1',
'broadcast': '192.168.1.255',
'dns1': '192.168.0.1',
'dns2': '192.168.0.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.1.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}]
fixed_ips = [{'id': 0,
'network_id': 0,
'address': '192.168.0.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '192.168.1.100',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []},
{'id': 0,
'network_id': 1,
'address': '2001:db9:0:1::10',
'instance_uuid': 0,
'allocated': False,
'virtual_interface_id': 0,
'floating_ips': []}]
flavor = {'id': 0,
'rxtx_cap': 3}
floating_ip_fields = {'id': 0,
'address': '192.168.10.100',
'pool': 'nova',
'interface': 'eth0',
'fixed_ip_id': 0,
'project_id': None,
'auto_assigned': False}
vifs = [{'id': 0,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:00',
'uuid': '00000000-0000-0000-0000-0000000000000000',
'network_id': 0,
'instance_uuid': 0},
{'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:01',
'uuid': '00000000-0000-0000-0000-0000000000000001',
'network_id': 1,
'instance_uuid': 0},
{'id': 2,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': 0,
'address': 'DE:AD:BE:EF:00:02',
'uuid': '00000000-0000-0000-0000-0000000000000002',
'network_id': 2,
'instance_uuid': 0}]
class FlatNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatNetworkTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatManager(host=HOST)
self.network.instance_dns_domain = ''
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
def test_get_instance_nw_info(self):
fake_get_instance_nw_info = fake_network.fake_get_instance_nw_info
nw_info = fake_get_instance_nw_info(self.stubs, 0, 2)
self.assertFalse(nw_info)
nw_info = fake_get_instance_nw_info(self.stubs, 1, 2)
for i, vif in enumerate(nw_info):
nid = i + 1
check = {'bridge': 'fake_br%d' % nid,
'cidr': '192.168.%s.0/24' % nid,
'cidr_v6': '2001:db8:0:%x::/64' % nid,
'id': '00000000-0000-0000-0000-00000000000000%02d' % nid,
'multi_host': False,
'injected': False,
'bridge_interface': None,
'vlan': None,
'broadcast': '192.168.%d.255' % nid,
'dhcp_server': '192.168.1.1',
'dns': ['192.168.%d.3' % nid, '192.168.%d.4' % nid],
'gateway': '192.168.%d.1' % nid,
'gateway_v6': '2001:db8:0:1::1',
'label': 'test%d' % nid,
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'ovs_interfaceid': None,
'qbh_params': None,
'qbg_params': None,
'should_create_vlan': False,
'should_create_bridge': False,
'ip': '192.168.%d.%03d' % (nid, nid + 99),
'ip_v6': '2001:db8:0:1::%x' % nid,
'netmask': '255.255.255.0',
'netmask_v6': 64,
'physical_network': None,
}
network = vif['network']
net_v4 = vif['network']['subnets'][0]
net_v6 = vif['network']['subnets'][1]
vif_dict = dict(bridge=network['bridge'],
cidr=net_v4['cidr'],
cidr_v6=net_v6['cidr'],
id=vif['id'],
multi_host=network.get_meta('multi_host', False),
injected=network.get_meta('injected', False),
bridge_interface=
network.get_meta('bridge_interface'),
vlan=network.get_meta('vlan'),
broadcast=str(net_v4.as_netaddr().broadcast),
dhcp_server=network.get_meta('dhcp_server',
net_v4['gateway']['address']),
dns=[ip['address'] for ip in net_v4['dns']],
gateway=net_v4['gateway']['address'],
gateway_v6=net_v6['gateway']['address'],
label=network['label'],
mac=vif['address'],
rxtx_cap=vif.get_meta('rxtx_cap'),
vif_type=vif['type'],
vif_devname=vif.get('devname'),
vif_uuid=vif['id'],
ovs_interfaceid=vif.get('ovs_interfaceid'),
qbh_params=vif.get('qbh_params'),
qbg_params=vif.get('qbg_params'),
should_create_vlan=
network.get_meta('should_create_vlan', False),
should_create_bridge=
network.get_meta('should_create_bridge',
False),
ip=net_v4['ips'][i]['address'],
ip_v6=net_v6['ips'][i]['address'],
netmask=str(net_v4.as_netaddr().netmask),
netmask_v6=net_v6.as_netaddr()._prefixlen,
physical_network=
network.get_meta('physical_network', None))
self.assertThat(vif_dict, matchers.DictMatches(check))
def test_validate_networks(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[1])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[0])
ip['network'] = dict(test_network.fake_network,
**networks[0])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_valid_fixed_ipv6(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, 'fixed_ip_get_by_address')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'2001:db9:0:1::10')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **networks[1])])
ip = dict(test_fixed_ip.fake_fixed_ip, **fixed_ips[2])
ip['network'] = dict(test_network.fake_network,
**networks[1])
ip['instance_uuid'] = None
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(ip)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_reserved(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, None, None, None, None, None)
self.assertEqual(1, len(nets))
network = nets[0]
self.assertEqual(4, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_end(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
nets = self.network.create_networks(context_admin, 'fake',
'192.168.0.0/24', False, 1,
256, dhcp_server='192.168.0.11',
allowed_start='192.168.0.10',
allowed_end='192.168.0.245')
self.assertEqual(1, len(nets))
network = nets[0]
# gateway defaults to beginning of allowed_start
self.assertEqual('192.168.0.10', network['gateway'])
# vpn_server doesn't conflict with dhcp_start
self.assertEqual('192.168.0.12', network['vpn_private_address'])
# dhcp_start doesn't conflict with dhcp_server
self.assertEqual('192.168.0.13', network['dhcp_start'])
# NOTE(vish): 10 from the beginning, 10 from the end, and
# 1 for the gateway, 1 for the dhcp server,
# 1 for the vpn server
self.assertEqual(23, db.network_count_reserved_ips(context_admin,
network['id']))
def test_validate_reserved_start_out_of_range(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AddressOutOfRange,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_start='192.168.1.10')
def test_validate_reserved_end_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidAddress,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 256, allowed_end='invalid')
def test_validate_cidr_invalid(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidCidr,
self.network.create_networks,
context_admin, 'fake', 'invalid', False,
1, 256)
def test_validate_non_int_size(self):
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.InvalidIntValue,
self.network.create_networks,
context_admin, 'fake', '192.168.0.0/24', False,
1, 'invalid')
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_id_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_add_fixed_ip_instance_using_uuid_without_vpn(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_mini_dns_driver(self):
zone1 = "example.org"
zone2 = "example.com"
driver = self.network.instance_dns_manager
driver.create_entry("hostone", "10.0.0.1", "A", zone1)
driver.create_entry("hosttwo", "10.0.0.2", "A", zone1)
driver.create_entry("hostthree", "10.0.0.3", "A", zone1)
driver.create_entry("hostfour", "10.0.0.4", "A", zone1)
driver.create_entry("hostfive", "10.0.0.5", "A", zone2)
driver.delete_entry("hostone", zone1)
driver.modify_address("hostfour", "10.0.0.1", zone1)
driver.modify_address("hostthree", "10.0.0.1", zone1)
names = driver.get_entries_by_address("10.0.0.1", zone1)
self.assertEqual(len(names), 2)
self.assertIn('hostthree', names)
self.assertIn('hostfour', names)
names = driver.get_entries_by_address("10.0.0.5", zone2)
self.assertEqual(len(names), 1)
self.assertIn('hostfive', names)
addresses = driver.get_entries_by_name("hosttwo", zone1)
self.assertEqual(len(addresses), 1)
self.assertIn('10.0.0.2', addresses)
self.assertRaises(exception.InvalidInput,
driver.create_entry,
"hostname",
"10.10.10.10",
"invalidtype",
zone1)
def test_mini_dns_driver_with_mixed_case(self):
zone1 = "example.org"
driver = self.network.instance_dns_manager
driver.create_entry("HostTen", "10.0.0.10", "A", zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 1)
for n in addresses:
driver.delete_entry(n, zone1)
addresses = driver.get_entries_by_address("10.0.0.10", zone1)
self.assertEqual(len(addresses), 0)
@mock.patch('nova.objects.quotas.Quotas.reserve')
def test_instance_dns(self, reserve):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
fixedip = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
self.mox.StubOutWithMock(db, 'network_get_by_uuid')
self.mox.StubOutWithMock(db, 'network_update')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None
).AndReturn(fixedip)
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
inst = fake_inst(display_name=HOST, uuid=FAKEUUID)
db.instance_get_by_uuid(self.context,
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(inst)
db.network_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['uuid'])
instance_manager = self.network.instance_dns_manager
addresses = instance_manager.get_entries_by_name(HOST,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
addresses = instance_manager.get_entries_by_name(FAKEUUID,
self.network.instance_dns_domain)
self.assertEqual(len(addresses), 1)
self.assertEqual(addresses[0], fixedip['address'])
exp_project, exp_user = quotas_obj.ids_from_instance(self.context,
inst)
reserve.assert_called_once_with(self.context, fixed_ips=1,
project_id=exp_project,
user_id=exp_user)
def test_allocate_floating_ip(self):
self.assertIsNone(self.network.allocate_floating_ip(self.context,
1, None))
def test_deallocate_floating_ip(self):
self.assertIsNone(self.network.deallocate_floating_ip(self.context,
1, None))
def test_associate_floating_ip(self):
self.assertIsNone(self.network.associate_floating_ip(self.context,
None, None))
def test_disassociate_floating_ip(self):
self.assertIsNone(self.network.disassociate_floating_ip(self.context,
None, None))
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_allocate_calculates_quota_auth(self, util_method, reserve,
get_by_uuid):
inst = objects.Instance()
inst['uuid'] = 'nosuch'
get_by_uuid.return_value = inst
reserve.side_effect = exception.OverQuota(overs='testing')
util_method.return_value = ('foo', 'bar')
self.assertRaises(exception.FixedIpLimitExceeded,
self.network.allocate_fixed_ip,
self.context, 123, {'uuid': 'nosuch'})
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_address')
@mock.patch('nova.objects.quotas.Quotas.reserve')
@mock.patch('nova.objects.quotas.ids_from_instance')
def test_deallocate_calculates_quota_auth(self, util_method, reserve,
get_by_address):
inst = objects.Instance(uuid='fake-uuid')
fip = objects.FixedIP(instance_uuid='fake-uuid',
virtual_interface_id=1)
get_by_address.return_value = fip
util_method.return_value = ('foo', 'bar')
# This will fail right after the reserve call when it tries
# to look up the fake instance we created above
self.assertRaises(exception.InstanceNotFound,
self.network.deallocate_fixed_ip,
self.context, '1.2.3.4', instance=inst)
util_method.assert_called_once_with(self.context, inst)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.virtual_interface.VirtualInterface'
'.get_by_instance_and_network')
@mock.patch('nova.objects.fixed_ip.FixedIP.disassociate')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
@mock.patch('nova.objects.fixed_ip.FixedIP.save')
def test_allocate_fixed_ip_cleanup(self,
mock_fixedip_save,
mock_fixedip_associate,
mock_fixedip_disassociate,
mock_vif_get,
mock_instance_get):
address = netaddr.IPAddress('1.2.3.4')
fip = objects.FixedIP(instance_uuid='fake-uuid',
address=address,
virtual_interface_id=1)
mock_fixedip_associate.return_value = fip
instance = objects.Instance(context=self.context)
instance.create()
mock_instance_get.return_value = instance
mock_vif_get.return_value = vif_obj.VirtualInterface(
instance_uuid='fake-uuid', id=1)
with contextlib.nested(
mock.patch.object(self.network, '_setup_network_on_host'),
mock.patch.object(self.network, 'instance_dns_manager'),
mock.patch.object(self.network,
'_do_trigger_security_group_members_refresh_for_instance')
) as (mock_setup_network, mock_dns_manager, mock_ignored):
mock_setup_network.side_effect = test.TestingException
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=address)
mock_dns_manager.delete_entry.assert_has_calls([
mock.call(instance.display_name, ''),
mock.call(instance.uuid, '')
])
mock_fixedip_disassociate.assert_called_once_with(self.context)
class FlatDHCPNetworkTestCase(test.TestCase):
def setUp(self):
super(FlatDHCPNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class VlanNetworkTestCase(test.TestCase):
def setUp(self):
super(VlanNetworkTestCase, self).setUp()
self.useFixture(test.SampleNetworks())
self.flags(use_local=True, group='conductor')
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
self.context = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
def test_quota_driver_type(self):
self.assertEqual(objects.QuotasNoOp,
self.network.quotas_cls)
def test_vpn_allocate_fixed_ip(self):
self.mox.StubOutWithMock(db, 'fixed_ip_associate')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg(),
network_id=mox.IgnoreArg(),
reserved=True).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network,
vpn=True)
def test_vpn_allocate_fixed_ip_no_network_id(self):
network = dict(networks[0])
network['vpn_private_address'] = '192.168.0.2'
network['id'] = None
instance = db.instance_create(self.context, {})
self.assertRaises(exception.FixedIpNotFoundForNetwork,
self.network.allocate_fixed_ip,
self.context_admin,
instance['uuid'],
network,
vpn=True)
def test_allocate_fixed_ip(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.1')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.mox.ReplayAll()
network = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **networks[0]))
network.vpn_private_address = '192.168.0.2'
self.network.allocate_fixed_ip(self.context, FAKEUUID, network)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch'},
address=netaddr.IPAddress('1.2.3.4'))
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1)
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
@mock.patch('nova.objects.fixed_ip.FixedIP.associate')
def test_allocate_fixed_ip_passes_string_address_vpn(self, mock_associate,
mock_get):
mock_associate.side_effect = test.TestingException
instance = objects.Instance(context=self.context)
instance.create()
mock_get.return_value = instance
self.assertRaises(test.TestingException,
self.network.allocate_fixed_ip,
self.context, instance.uuid,
{'cidr': '24', 'id': 1, 'uuid': 'nosuch',
'vpn_private_address': netaddr.IPAddress('1.2.3.4')
}, vpn=1)
mock_associate.assert_called_once_with(self.context,
'1.2.3.4',
instance.uuid,
1, reserved=True)
def test_create_networks_too_big(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=4094, vlan_start=1)
def test_create_networks_too_many(self):
self.assertRaises(ValueError, self.network.create_networks, None,
num_networks=100, vlan_start=1,
cidr='192.168.0.1/24', network_size=100)
def test_duplicate_vlan_raises(self):
# VLAN 100 is already used and we force the network to be created
# in that vlan (vlan=100).
self.assertRaises(exception.DuplicateVlan,
self.network.create_networks,
self.context_admin, label="fake", num_networks=1,
vlan=100, cidr='192.168.0.1/24', network_size=100)
def test_vlan_start(self):
# VLAN 100 and 101 are used, so this network shoud be created in 102
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_start_multiple(self):
# VLAN 100 and 101 are used, so these networks shoud be created in 102
# and 103
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=2,
vlan_start=100, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
self.assertEqual(networks[1]["vlan"], 103)
def test_vlan_start_used(self):
# VLAN 100 and 101 are used, but vlan_start=99.
networks = self.network.create_networks(
self.context_admin, label="fake", num_networks=1,
vlan_start=99, cidr='192.168.3.1/24',
network_size=100)
self.assertEqual(networks[0]["vlan"], 102)
def test_vlan_parameter(self):
# vlan parameter could not be greater than 4094
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=4095, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be greater than 4094'
self.assertIn(error_msg, six.text_type(exc))
# vlan parameter could not be less than 1
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan=0, cidr='192.168.0.1/24')
error_msg = 'The vlan number cannot be less than 1'
self.assertIn(error_msg, six.text_type(exc))
def test_vlan_be_integer(self):
# vlan must be an integer
exc = self.assertRaises(ValueError,
self.network.create_networks,
self.context_admin, label="fake",
num_networks=1,
vlan='fake', cidr='192.168.0.1/24')
error_msg = 'vlan must be an integer'
self.assertIn(error_msg, six.text_type(exc))
@mock.patch('nova.db.network_get')
def test_validate_networks(self, net_get):
def network_get(_context, network_id, project_only='allow_none'):
return dict(test_network.fake_network, **networks[network_id])
net_get.side_effect = network_get
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
self.mox.StubOutWithMock(db, "fixed_ip_get_by_address")
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
db_fixed1 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[1]['id'],
network=dict(test_network.fake_network,
**networks[1]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed1)
db_fixed2 = dict(test_fixed_ip.fake_fixed_ip,
network_id=networks[0]['id'],
network=dict(test_network.fake_network,
**networks[0]),
instance_uuid=None)
db.fixed_ip_get_by_address(mox.IgnoreArg(),
mox.IgnoreArg(),
columns_to_join=mox.IgnoreArg()
).AndReturn(db_fixed2)
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_none_requested_networks(self):
self.network.validate_networks(self.context, None)
def test_validate_networks_empty_requested_networks(self):
requested_networks = []
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_validate_networks_invalid_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'192.168.1.100.1'),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa',
'192.168.0.100.1')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks, self.context,
requested_networks)
def test_validate_networks_empty_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', ''),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', '')]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.assertRaises(exception.FixedIpInvalid,
self.network.validate_networks,
self.context, requested_networks)
def test_validate_networks_none_fixed_ip(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = [('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb', None),
('aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa', None)]
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
self.network.validate_networks(self.context, requested_networks)
def test_floating_ip_owned_by_project(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
# raises because floating_ip project_id is None
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# raises because floating_ip project_id is not equal to ctxt project_id
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id + '1')
self.assertRaises(exception.Forbidden,
self.network._floating_ip_owned_by_project,
ctxt,
floating_ip)
# does not raise (floating ip is owned by ctxt project)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=ctxt.project_id)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
ctxt = context.RequestContext(None, None,
is_admin=True)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id=None)
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
# does not raise (ctxt is admin)
floating_ip = objects.FloatingIP(address='10.0.0.1',
project_id='testproject')
self.network._floating_ip_owned_by_project(ctxt, floating_ip)
def test_allocate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake_allocate_address(*args, **kwargs):
return {'address': '10.0.0.1', 'project_id': ctxt.project_id}
self.stubs.Set(self.network.db, 'floating_ip_allocate_address',
fake_allocate_address)
self.network.allocate_floating_ip(ctxt, ctxt.project_id)
@mock.patch('nova.quota.QUOTAS.reserve')
@mock.patch('nova.quota.QUOTAS.commit')
def test_deallocate_floating_ip(self, mock_commit, mock_reserve):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip)
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=1)
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_deallocate', fake1)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# this time should raise because floating ip is associated to fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpAssociated,
self.network.deallocate_floating_ip,
ctxt,
mox.IgnoreArg())
mock_reserve.return_value = 'reserve'
# this time should not raise
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
self.network.deallocate_floating_ip(ctxt, ctxt.project_id)
mock_commit.assert_called_once_with(ctxt, 'reserve',
project_id='testproject')
@mock.patch('nova.db.fixed_ip_get')
def test_associate_floating_ip(self, fixed_get):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
network=test_network.fake_network)
# floating ip that's already associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1)
# floating ip that isn't associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
raise processutils.ProcessExecutionError('',
'Cannot find device "em0"\n')
def fake9(*args, **kwargs):
raise test.TestingException()
# raises because interface doesn't exist
self.stubs.Set(self.network.db,
'floating_ip_fixed_ip_associate',
fake1)
self.stubs.Set(self.network.db, 'floating_ip_disassociate', fake1)
self.stubs.Set(self.network.driver, 'ensure_floating_forward', fake8)
self.assertRaises(exception.NoFloatingIpInterface,
self.network._associate_floating_ip,
ctxt,
'1.2.3.4',
'1.2.3.5',
mox.IgnoreArg(),
mox.IgnoreArg())
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is already associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.stubs.Set(self.network, 'disassociate_floating_ip', fake9)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
instance_uuid='fake_uuid',
network=test_network.fake_network)
# doesn't raise because we exit early if the address is the same
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(), '1.2.3.4')
# raises because we call disassociate which is mocked
self.assertRaises(test.TestingException,
self.network.associate_floating_ip,
ctxt,
mox.IgnoreArg(),
'new')
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get_by_address', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_associate_floating_ip', fake7)
self.network.associate_floating_ip(ctxt, mox.IgnoreArg(),
mox.IgnoreArg())
self.assertTrue(self.local)
def test_add_floating_ip_nat_before_bind(self):
# Tried to verify order with documented mox record/verify
# functionality, but it doesn't seem to work since I can't make it
# fail. I'm using stubs and a flag for now, but if this mox feature
# can be made to work, it would be a better way to test this.
#
# self.mox.StubOutWithMock(self.network.driver,
# 'ensure_floating_forward')
# self.mox.StubOutWithMock(self.network.driver, 'bind_floating_ip')
#
# self.network.driver.ensure_floating_forward(mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg(),
# mox.IgnoreArg())
# self.network.driver.bind_floating_ip(mox.IgnoreArg(),
# mox.IgnoreArg())
# self.mox.ReplayAll()
nat_called = [False]
def fake_nat(*args, **kwargs):
nat_called[0] = True
def fake_bind(*args, **kwargs):
self.assertTrue(nat_called[0])
self.stubs.Set(self.network.driver,
'ensure_floating_forward',
fake_nat)
self.stubs.Set(self.network.driver, 'bind_floating_ip', fake_bind)
self.network.l3driver.add_floating_ip('fakefloat',
'fakefixed',
'fakeiface',
'fakenet')
@mock.patch('nova.db.floating_ip_get_all_by_host')
@mock.patch('nova.db.fixed_ip_get')
def _test_floating_ip_init_host(self, fixed_get, floating_get,
public_interface, expected_arg):
floating_get.return_value = [
dict(test_floating_ip.fake_floating_ip,
interface='foo',
address='1.2.3.4'),
dict(test_floating_ip.fake_floating_ip,
interface='fakeiface',
address='1.2.3.5',
fixed_ip_id=1),
dict(test_floating_ip.fake_floating_ip,
interface='bar',
address='1.2.3.6',
fixed_ip_id=2),
]
def fixed_ip_get(_context, fixed_ip_id, get_network):
if fixed_ip_id == 1:
return dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network=test_network.fake_network)
raise exception.FixedIpNotFound(id=fixed_ip_id)
fixed_get.side_effect = fixed_ip_get
self.mox.StubOutWithMock(self.network.l3driver, 'add_floating_ip')
self.flags(public_interface=public_interface)
self.network.l3driver.add_floating_ip(netaddr.IPAddress('1.2.3.5'),
netaddr.IPAddress('1.2.3.4'),
expected_arg,
mox.IsA(objects.Network))
self.mox.ReplayAll()
self.network.init_host_floating_ips()
self.mox.UnsetStubs()
self.mox.VerifyAll()
def test_floating_ip_init_host_without_public_interface(self):
self._test_floating_ip_init_host(public_interface=False,
expected_arg='fakeiface')
def test_floating_ip_init_host_with_public_interface(self):
self._test_floating_ip_init_host(public_interface='fooiface',
expected_arg='fooiface')
def test_disassociate_floating_ip(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake1(*args, **kwargs):
pass
# floating ip that isn't associated
def fake2(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=None)
# floating ip that is associated
def fake3(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
project_id=ctxt.project_id)
# fixed ip with remote host
def fake4(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=123)
def fake4_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False,
host='jibberjabber')
# fixed ip with local host
def fake5(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
address='10.0.0.1',
pool='nova',
instance_uuid=FAKEUUID,
interface='eth0',
network_id=1234)
def fake5_network(*args, **kwargs):
return dict(test_network.fake_network,
multi_host=False, host='testhost')
def fake6(ctxt, method, **kwargs):
self.local = False
def fake7(*args, **kwargs):
self.local = True
def fake8(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1',
pool='nova',
interface='eth0',
fixed_ip_id=1,
auto_assigned=True,
project_id=ctxt.project_id)
self.stubs.Set(self.network, '_floating_ip_owned_by_project', fake1)
# raises because floating_ip is not associated to a fixed_ip
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake2)
self.assertRaises(exception.FloatingIpNotAssociated,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake3)
# does not raise and makes call remotely
self.local = True
self.stubs.Set(self.network.db, 'fixed_ip_get', fake4)
self.stubs.Set(self.network.db, 'network_get', fake4_network)
self.stubs.Set(self.network.network_rpcapi.client, 'prepare',
lambda **kw: self.network.network_rpcapi.client)
self.stubs.Set(self.network.network_rpcapi.client, 'call', fake6)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertFalse(self.local)
# does not raise and makes call locally
self.local = False
self.stubs.Set(self.network.db, 'fixed_ip_get', fake5)
self.stubs.Set(self.network.db, 'network_get', fake5_network)
self.stubs.Set(self.network, '_disassociate_floating_ip', fake7)
self.network.disassociate_floating_ip(ctxt, mox.IgnoreArg())
self.assertTrue(self.local)
# raises because auto_assigned floating IP cannot be disassociated
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake8)
self.assertRaises(exception.CannotDisassociateAutoAssignedFloatingIP,
self.network.disassociate_floating_ip,
ctxt,
mox.IgnoreArg())
def test_add_fixed_ip_instance_without_vpn_requested_networks(self):
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
lambda *a, **kw: None)
self.mox.StubOutWithMock(db, 'network_get')
self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
self.mox.StubOutWithMock(db,
'virtual_interface_get_by_instance_and_network')
self.mox.StubOutWithMock(db, 'fixed_ip_update')
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
self.mox.StubOutWithMock(self.network, 'get_instance_nw_info')
db.fixed_ip_update(mox.IgnoreArg(),
mox.IgnoreArg(),
mox.IgnoreArg())
db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs[0])
fixed = dict(test_fixed_ip.fake_fixed_ip,
address='192.168.0.101')
db.fixed_ip_associate_pool(mox.IgnoreArg(),
mox.IgnoreArg(),
instance_uuid=mox.IgnoreArg(),
host=None).AndReturn(fixed)
db.network_get(mox.IgnoreArg(),
mox.IgnoreArg(),
project_only=mox.IgnoreArg()
).AndReturn(dict(test_network.fake_network,
**networks[0]))
db.instance_get_by_uuid(mox.IgnoreArg(),
mox.IgnoreArg(), use_slave=False,
columns_to_join=['info_cache',
'security_groups']
).AndReturn(fake_inst(display_name=HOST,
uuid=FAKEUUID))
self.network.get_instance_nw_info(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.network.add_fixed_ip_to_instance(self.context, FAKEUUID, HOST,
networks[0]['id'])
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
def test_ip_association_and_allocation_of_other_project(self, net_get,
fixed_get):
"""Makes sure that we cannot deallocaate or disassociate
a public ip of other project.
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
context2 = context.RequestContext('user', 'project2')
float_ip = db.floating_ip_create(context1.elevated(),
{'address': '1.2.3.4',
'project_id': context1.project_id})
float_addr = float_ip['address']
instance = db.instance_create(context1,
{'project_id': 'project1'})
fix_addr = db.fixed_ip_associate_pool(context1.elevated(),
1, instance['uuid']).address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
# Associate the IP with non-admin user context
self.assertRaises(exception.Forbidden,
self.network.associate_floating_ip,
context2,
float_addr,
fix_addr)
# Deallocate address from other project
self.assertRaises(exception.Forbidden,
self.network.deallocate_floating_ip,
context2,
float_addr)
# Now Associates the address to the actual project
self.network.associate_floating_ip(context1, float_addr, fix_addr)
# Now try dis-associating from other project
self.assertRaises(exception.Forbidden,
self.network.disassociate_floating_ip,
context2,
float_addr)
# Clean up the ip addresses
self.network.disassociate_floating_ip(context1, float_addr)
self.network.deallocate_floating_ip(context1, float_addr)
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
db.floating_ip_destroy(context1.elevated(), float_addr)
db.fixed_ip_disassociate(context1.elevated(), fix_addr)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed(self, fixed_update, net_get, fixed_get):
"""Verify that release is called properly.
Ensures https://bugs.launchpad.net/nova/+bug/973442 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.mox.StubOutWithMock(linux_net, 'release_dhcp')
linux_net.release_dhcp(networks[1]['bridge'], fix_addr.address,
'DE:AD:BE:EF:00:00')
self.mox.ReplayAll()
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_with_dhcp_exception(self, fixed_update, net_get,
fixed_get):
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return vifs[0]
with contextlib.nested(
mock.patch.object(db, 'virtual_interface_get', vif_get),
mock.patch.object(
utils, 'execute',
side_effect=processutils.ProcessExecutionError()),
) as (_vif_get, _execute):
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1,
instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
instance_uuid=instance.uuid,
allocated=True,
virtual_interface_id=3,
network=dict(
test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
self.network.deallocate_fixed_ip(context1, fix_addr.address,
'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
_execute.assert_called_once_with('dhcp_release',
networks[1]['bridge'],
fix_addr.address,
'DE:AD:BE:EF:00:00',
run_as_root=True)
def test_deallocate_fixed_deleted(self):
# Verify doesn't deallocate deleted fixed_ip from deleted network.
def teardown_network_on_host(_context, network):
if network['id'] == 0:
raise test.TestingException()
self.stubs.Set(self.network, '_teardown_network_on_host',
teardown_network_on_host)
context1 = context.RequestContext('user', 'project1')
elevated = context1.elevated()
instance = db.instance_create(context1,
{'project_id': 'project1'})
network = db.network_create_safe(elevated, networks[0])
_fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fix_addr = _fix_addr.address
db.fixed_ip_update(elevated, fix_addr, {'deleted': 1})
elevated.read_deleted = 'yes'
delfixed = db.fixed_ip_get_by_address(elevated, fix_addr)
values = {'address': fix_addr,
'network_id': network.id,
'instance_uuid': delfixed['instance_uuid']}
db.fixed_ip_create(elevated, values)
elevated.read_deleted = 'no'
elevated.read_deleted = 'yes'
deallocate = self.network.deallocate_fixed_ip
self.assertRaises(test.TestingException, deallocate, context1,
fix_addr, 'fake')
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_deallocate_fixed_no_vif(self, fixed_update, net_get, fixed_get):
"""Verify that deallocate doesn't raise when no vif is returned.
Ensures https://bugs.launchpad.net/nova/+bug/968457 doesn't return
"""
net_get.return_value = dict(test_network.fake_network,
**networks[1])
def vif_get(_context, _vif_id):
return None
self.stubs.Set(db, 'virtual_interface_get', vif_get)
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = db.fixed_ip_associate_pool(elevated, 1, instance['uuid'])
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.flags(force_dhcp_release=True)
fixed_update.return_value = fixed_get.return_value
self.network.deallocate_fixed_ip(context1, fix_addr.address, 'fake')
fixed_update.assert_called_once_with(context1, fix_addr.address,
{'allocated': False})
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ip_update')
def test_fixed_ip_cleanup_fail(self, fixed_update, net_get, fixed_get):
# Verify IP is not deallocated if the security group refresh fails.
net_get.return_value = dict(test_network.fake_network,
**networks[1])
context1 = context.RequestContext('user', 'project1')
instance = db.instance_create(context1,
{'project_id': 'project1'})
elevated = context1.elevated()
fix_addr = objects.FixedIP.associate_pool(elevated, 1,
instance['uuid'])
def fake_refresh(instance_uuid):
raise test.TestingException()
self.stubs.Set(self.network,
'_do_trigger_security_group_members_refresh_for_instance',
fake_refresh)
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
address=fix_addr.address,
allocated=True,
virtual_interface_id=3,
instance_uuid=instance.uuid,
network=dict(test_network.fake_network,
**networks[1]))
self.assertRaises(test.TestingException,
self.network.deallocate_fixed_ip,
context1, str(fix_addr.address), 'fake')
self.assertFalse(fixed_update.called)
def test_get_networks_by_uuids_ordering(self):
self.mox.StubOutWithMock(db, 'network_get_all_by_uuids')
requested_networks = ['bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa']
db.network_get_all_by_uuids(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(
[dict(test_network.fake_network, **net)
for net in networks])
self.mox.ReplayAll()
res = self.network._get_networks_by_uuids(self.context,
requested_networks)
self.assertEqual(res[0]['id'], 1)
self.assertEqual(res[1]['id'], 0)
@mock.patch('nova.objects.fixed_ip.FixedIP.get_by_id')
@mock.patch('nova.objects.floating_ip.FloatingIPList.get_by_host')
@mock.patch('nova.network.linux_net.iptables_manager._apply')
def test_init_host_iptables_defer_apply(self, iptable_apply,
floating_get_by_host,
fixed_get_by_id):
def get_by_id(context, fixed_ip_id, **kwargs):
net = objects.Network(bridge='testbridge',
cidr='192.168.1.0/24')
if fixed_ip_id == 1:
return objects.FixedIP(address='192.168.1.4',
network=net)
elif fixed_ip_id == 2:
return objects.FixedIP(address='192.168.1.5',
network=net)
def fake_apply():
fake_apply.count += 1
fake_apply.count = 0
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
float1 = objects.FloatingIP(address='1.2.3.4', fixed_ip_id=1)
float2 = objects.FloatingIP(address='1.2.3.5', fixed_ip_id=2)
float1._context = ctxt
float2._context = ctxt
iptable_apply.side_effect = fake_apply
floating_get_by_host.return_value = [float1, float2]
fixed_get_by_id.side_effect = get_by_id
self.network.init_host()
self.assertEqual(1, fake_apply.count)
class _TestDomainObject(object):
def __init__(self, **kwargs):
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
class FakeNetwork(object):
def __init__(self, **kwargs):
self.vlan = None
for k, v in kwargs.iteritems():
self.__setattr__(k, v)
def __getitem__(self, item):
return getattr(self, item)
class CommonNetworkTestCase(test.TestCase):
def setUp(self):
super(CommonNetworkTestCase, self).setUp()
self.context = context.RequestContext('fake', 'fake')
self.flags(ipv6_backend='rfc2462')
self.flags(use_local=True, group='conductor')
ipv6.reset_backend()
def test_validate_instance_zone_for_dns_domain(self):
domain = 'example.com'
az = 'test_az'
domains = {
domain: _TestDomainObject(
domain=domain,
availability_zone=az)}
def dnsdomain_get(context, instance_domain):
return domains.get(instance_domain)
self.stubs.Set(db, 'dnsdomain_get', dnsdomain_get)
fake_instance = {'uuid': FAKEUUID,
'availability_zone': az}
manager = network_manager.NetworkManager()
res = manager._validate_instance_zone_for_dns_domain(self.context,
fake_instance)
self.assertTrue(res)
def fake_create_fixed_ips(self, context, network_id, fixed_cidr=None,
extra_reserved=None, bottom_reserved=0,
top_reserved=0):
return None
def test_get_instance_nw_info_client_exceptions(self):
manager = network_manager.NetworkManager()
self.mox.StubOutWithMock(manager.db,
'virtual_interface_get_by_instance')
manager.db.virtual_interface_get_by_instance(
self.context, FAKEUUID,
use_slave=False).AndRaise(exception.InstanceNotFound(
instance_id=FAKEUUID))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
manager.get_instance_nw_info,
self.context, FAKEUUID, 'fake_rxtx_factor', HOST)
@mock.patch('nova.db.instance_get')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_deallocate_for_instance_passes_host_info(self, fixed_get,
instance_get):
manager = fake_network.FakeNetworkManager()
db = manager.db
instance_get.return_value = fake_inst(uuid='ignoreduuid')
db.virtual_interface_delete_by_instance = lambda _x, _y: None
ctx = context.RequestContext('igonre', 'igonre')
fixed_get.return_value = [dict(test_fixed_ip.fake_fixed_ip,
address='1.2.3.4',
network_id=123)]
manager.deallocate_for_instance(
ctx, instance=objects.Instance._from_db_object(self.context,
objects.Instance(), instance_get.return_value))
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host')
], manager.deallocate_fixed_ip_calls)
def test_deallocate_for_instance_with_requested_networks(self):
manager = fake_network.FakeNetworkManager()
db = manager.db
db.virtual_interface_delete_by_instance = mock.Mock()
ctx = context.RequestContext('igonre', 'igonre')
requested_networks = objects.NetworkRequestList(
objects=[objects.NetworkRequest.from_tuple(t)
for t in [('123', '1.2.3.4'), ('123', '4.3.2.1')]])
manager.deallocate_for_instance(
ctx,
instance=fake_instance.fake_instance_obj(ctx),
requested_networks=requested_networks)
self.assertEqual([
(ctx, '1.2.3.4', 'fake-host'), (ctx, '4.3.2.1', 'fake-host')
], manager.deallocate_fixed_ip_calls)
@mock.patch('nova.db.fixed_ip_get_by_instance')
@mock.patch('nova.db.fixed_ip_disassociate')
def test_remove_fixed_ip_from_instance(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
get.return_value = [
dict(test_fixed_ip.fake_fixed_ip, **x)
for x in manager.db.fixed_ip_get_by_instance(None,
FAKEUUID)]
manager.remove_fixed_ip_from_instance(self.context, FAKEUUID,
HOST,
'10.0.0.1')
self.assertEqual(manager.deallocate_called, '10.0.0.1')
disassociate.assert_called_once_with(self.context, '10.0.0.1')
@mock.patch('nova.db.fixed_ip_get_by_instance')
def test_remove_fixed_ip_from_instance_bad_input(self, get):
manager = fake_network.FakeNetworkManager()
get.return_value = []
self.assertRaises(exception.FixedIpNotFoundForSpecificInstance,
manager.remove_fixed_ip_from_instance,
self.context, 99, HOST, 'bad input')
def test_validate_cidrs(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 1, 256, None, None, None,
None, None)
self.assertEqual(1, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', cidrs)
def test_validate_cidrs_split_exact_in_half(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/24',
False, 2, 128, None, None, None,
None, None)
self.assertEqual(2, len(nets))
cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/25', cidrs)
self.assertIn('192.168.0.128/25', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_cidr_in_use_middle_of_range(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/24')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None,
None, None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_smaller_subnet_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.9/25')]
# CidrConflict: requested cidr (192.168.2.0/24) conflicts with
# existing smaller cidr
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.2.0/25')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 4, 256, None, None, None, None,
None)
self.assertEqual(4, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24',
'192.168.4.0/24']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/24', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_smaller_cidr_in_use2(self, get_all):
manager = fake_network.FakeNetworkManager()
self.mox.StubOutWithMock(manager.db, 'network_get_all')
get_all.return_value = [dict(test_network.fake_network, id=1,
cidr='192.168.2.9/29')]
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.2.0/24',
False, 3, 32, None, None, None, None,
None)
self.assertEqual(3, len(nets))
cidrs = [str(net['cidr']) for net in nets]
exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27']
for exp_cidr in exp_cidrs:
self.assertIn(exp_cidr, cidrs)
self.assertNotIn('192.168.2.0/27', cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_split_all_in_use(self, get_all):
manager = fake_network.FakeNetworkManager()
in_use = [dict(test_network.fake_network, **values) for values in
[{'id': 1, 'cidr': '192.168.2.9/29'},
{'id': 2, 'cidr': '192.168.2.64/26'},
{'id': 3, 'cidr': '192.168.2.128/26'}]]
get_all.return_value = in_use
args = (self.context.elevated(), 'fake', '192.168.2.0/24', False,
3, 64, None, None, None, None, None)
# CidrConflict: Not enough subnets avail to satisfy requested num_
# networks - some subnets in requested range already
# in use
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_one_in_use(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None,
None, None, None)
# ValueError: network_size * num_networks exceeds cidr size
self.assertRaises(ValueError, manager.create_networks, *args)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
cidr='192.168.0.0/24')]
# CidrConflict: cidr already in use
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_validate_cidrs_too_many(self):
manager = fake_network.FakeNetworkManager()
args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None,
None, None, None)
# ValueError: Not enough subnets avail to satisfy requested
# num_networks
self.assertRaises(ValueError, manager.create_networks, *args)
def test_validate_cidrs_split_partial(self):
manager = fake_network.FakeNetworkManager()
nets = manager.create_networks(self.context.elevated(), 'fake',
'192.168.0.0/16',
False, 2, 256, None, None, None, None,
None)
returned_cidrs = [str(net['cidr']) for net in nets]
self.assertIn('192.168.0.0/24', returned_cidrs)
self.assertIn('192.168.1.0/24', returned_cidrs)
@mock.patch('nova.db.network_get_all')
def test_validate_cidrs_conflict_existing_supernet(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/8')]
args = (self.context.elevated(), 'fake', '192.168.0.0/24', False,
1, 256, None, None, None, None, None)
# CidrConflict: requested cidr (192.168.0.0/24) conflicts
# with existing supernet
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks(self):
cidr = '192.168.0.0/24'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get_all')
def test_create_networks_cidr_already_used(self, get_all):
manager = fake_network.FakeNetworkManager()
get_all.return_value = [dict(test_network.fake_network,
id=1, cidr='192.168.0.0/24')]
args = [self.context.elevated(), 'foo', '192.168.0.0/24', None, 1, 256,
'fd00::/48', None, None, None, None, None]
self.assertRaises(exception.CidrConflict,
manager.create_networks, *args)
def test_create_networks_many(self):
cidr = '192.168.0.0/16'
manager = fake_network.FakeNetworkManager()
self.stubs.Set(manager, '_create_fixed_ips',
self.fake_create_fixed_ips)
args = [self.context.elevated(), 'foo', cidr, None, 10, 256,
'fd00::/48', None, None, None, None, None]
self.assertTrue(manager.create_networks(*args))
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip_regex(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '10.0.0.1'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '173.16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '172.16.0.*'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip': '17..16.0.2'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
def test_get_instance_uuids_by_ipv6_regex(self, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
def _network_get(context, network_id, **args):
return dict(test_network.fake_network,
**manager.db.network_get(context, network_id))
network_get.side_effect = _network_get
# Greedy get eveything
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*'})
self.assertEqual(len(res), len(_vifs))
# Doesn't exist
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*1034.*'})
self.assertFalse(res)
# Get instance 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '2001:.*2'})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip6 = '2001:db8:69:1f:dead:beff:feff:ef03'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
# Get instance 0 and 1
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': '.*ef0[1,2]'})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[0]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 1 and 2
ip6 = '2001:db8:69:1.:dead:beff:feff:ef0.'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'ip6': ip6})
self.assertTrue(res)
self.assertEqual(len(res), 2)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
self.assertEqual(res[1]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.fixed_ips_by_virtual_interface')
def test_get_instance_uuids_by_ip(self, fixed_get, network_get):
manager = fake_network.FakeNetworkManager(self.stubs)
fixed_get.side_effect = manager.db.fixed_ips_by_virtual_interface
_vifs = manager.db.virtual_interface_get_all(None)
fake_context = context.RequestContext('user', 'project')
network_get.return_value = dict(test_network.fake_network,
**manager.db.network_get(None, 1))
# No regex for you!
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': '.*'})
self.assertFalse(res)
# Doesn't exist
ip = '10.0.0.1'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertFalse(res)
# Get instance 1
ip = '172.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[1]['instance_uuid'])
# Get instance 2
ip = '173.16.0.2'
res = manager.get_instance_uuids_by_ip_filter(fake_context,
{'fixed_ip': ip})
self.assertTrue(res)
self.assertEqual(len(res), 1)
self.assertEqual(res[0]['instance_uuid'], _vifs[2]['instance_uuid'])
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network, **networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
network = manager.get_network(fake_context, uuid)
self.assertEqual(network['uuid'], uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_get_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='foo')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.get_network, fake_context, uuid)
@mock.patch('nova.db.network_get_all')
def test_get_all_networks(self, get_all):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get_all.return_value = [dict(test_network.fake_network, **net)
for net in networks]
output = manager.get_all_networks(fake_context)
self.assertEqual(len(networks), 2)
self.assertEqual(output[0]['uuid'],
'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa')
self.assertEqual(output[1]['uuid'],
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb')
@mock.patch('nova.db.network_get_by_uuid')
@mock.patch('nova.db.network_disassociate')
def test_disassociate_network(self, disassociate, get):
manager = fake_network.FakeNetworkManager()
disassociate.return_value = True
fake_context = context.RequestContext('user', 'project')
get.return_value = dict(test_network.fake_network,
**networks[0])
uuid = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
manager.disassociate_network(fake_context, uuid)
@mock.patch('nova.db.network_get_by_uuid')
def test_disassociate_network_not_found(self, get):
manager = fake_network.FakeNetworkManager()
fake_context = context.RequestContext('user', 'project')
get.side_effect = exception.NetworkNotFoundForUUID(uuid='fake')
uuid = 'eeeeeeee-eeee-eeee-eeee-eeeeeeeeeeee'
self.assertRaises(exception.NetworkNotFound,
manager.disassociate_network, fake_context, uuid)
def _test_init_host_dynamic_fixed_range(self, net_manager):
self.flags(fake_network=True,
routing_source_ip='172.16.0.1',
metadata_host='172.16.0.1',
public_interface='eth1',
dmz_cidr=['10.0.3.0/24'])
binary_name = linux_net.get_binary_name()
# Stub out calls we don't want to really run, mock the db
self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None)
self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips',
lambda *args: None)
self.stubs.Set(net_manager.l3driver, 'initialize_gateway',
lambda *args: None)
self.mox.StubOutWithMock(db, 'network_get_all_by_host')
fake_networks = [dict(test_network.fake_network, **n)
for n in networks]
db.network_get_all_by_host(mox.IgnoreArg(),
mox.IgnoreArg()
).MultipleTimes().AndReturn(fake_networks)
self.mox.ReplayAll()
net_manager.init_host()
# Get the iptables rules that got created
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
expected_lines = ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[0]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[0]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[0]['cidr'],
networks[0]['cidr']),
'[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, networks[1]['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, networks[1]['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! '
'--ctstate DNAT -j ACCEPT' % (binary_name,
networks[1]['cidr'],
networks[1]['cidr'])]
# Compare the expected rules against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
# Add an additional network and ensure the rules get configured
new_network = {'id': 2,
'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc',
'label': 'test2',
'injected': False,
'multi_host': False,
'cidr': '192.168.2.0/24',
'cidr_v6': '2001:dba::/64',
'gateway_v6': '2001:dba::1',
'netmask_v6': '64',
'netmask': '255.255.255.0',
'bridge': 'fa1',
'bridge_interface': 'fake_fa1',
'gateway': '192.168.2.1',
'dhcp_server': '192.168.2.1',
'broadcast': '192.168.2.255',
'dns1': '192.168.2.1',
'dns2': '192.168.2.2',
'vlan': None,
'host': HOST,
'project_id': 'fake_project',
'vpn_public_address': '192.168.2.2',
'vpn_public_port': '22',
'vpn_private_address': '10.0.0.2'}
new_network_obj = objects.Network._from_db_object(
self.context, objects.Network(),
dict(test_network.fake_network, **new_network))
ctxt = context.get_admin_context()
net_manager._setup_network_on_host(ctxt, new_network_obj)
# Get the new iptables rules that got created from adding a new network
current_lines = []
new_lines = linux_net.iptables_manager._modify_rules(current_lines,
linux_net.iptables_manager.ipv4['nat'],
table_name='nat')
# Add the new expected rules to the old ones
expected_lines += ['[0:0] -A %s-snat -s %s -d 0.0.0.0/0 '
'-j SNAT --to-source %s -o %s'
% (binary_name, new_network['cidr'],
CONF.routing_source_ip,
CONF.public_interface),
'[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.metadata_host),
'[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT'
% (binary_name, new_network['cidr'],
CONF.dmz_cidr[0]),
'[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack '
'! --ctstate DNAT -j ACCEPT' % (binary_name,
new_network['cidr'],
new_network['cidr'])]
# Compare the expected rules (with new network) against the actual ones
for line in expected_lines:
self.assertIn(line, new_lines)
def test_flatdhcpmanager_dynamic_fixed_range(self):
"""Test FlatDHCPManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.FlatDHCPManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
def test_vlanmanager_dynamic_fixed_range(self):
"""Test VlanManager NAT rules for fixed_range."""
# Set the network manager
self.network = network_manager.VlanManager(host=HOST)
self.network.db = db
# Test new behavior:
# CONF.fixed_range is not set, defaults to None
# Determine networks to NAT based on lookup
self._test_init_host_dynamic_fixed_range(self.network)
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
"""Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
"""Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.flags(use_local=True, group='conductor')
self.rpc_fixed = TestRPCFixedManager()
self.context = context.RequestContext('fake', 'fake')
def test_rpc_allocate(self):
"""Test to verify bug 855030 doesn't resurface.
Mekes sure _rpc_allocate_fixed_ip returns a value so the call
returns properly and the greenpool completes.
"""
address = '10.10.10.10'
def fake_allocate(*args, **kwargs):
return address
def fake_network_get(*args, **kwargs):
return test_network.fake_network
self.stubs.Set(self.rpc_fixed, 'allocate_fixed_ip', fake_allocate)
self.stubs.Set(self.rpc_fixed.db, 'network_get', fake_network_get)
rval = self.rpc_fixed._rpc_allocate_fixed_ip(self.context,
'fake_instance',
'fake_network')
self.assertEqual(rval, address)
class TestFloatingIPManager(floating_ips.FloatingIP,
network_manager.NetworkManager):
"""Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
def setUp(self):
super(AllocateTestCase, self).setUp()
dns = 'nova.network.noop_dns_driver.NoopDNSDriver'
self.flags(instance_dns_manager=dns)
self.useFixture(test.SampleNetworks())
self.conductor = self.start_service(
'conductor', manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.network = self.start_service('network')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id,
self.project_id,
is_admin=True)
self.user_context = context.RequestContext('testuser',
'testproject')
def test_allocate_for_instance(self):
address = "10.10.10.10"
self.flags(auto_assign_floating_ip=True)
db.floating_ip_create(self.context,
{'address': address,
'pool': 'nova'})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.user_context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=None)
self.assertEqual(1, len(nw_info))
fixed_ip = nw_info.fixed_ips()[0]['address']
self.assertTrue(utils.is_valid_ipv4(fixed_ip))
self.network.deallocate_for_instance(self.context,
instance=inst)
def test_allocate_for_instance_illegal_network(self):
networks = db.network_get_all(self.context)
requested_networks = []
for network in networks:
# set all networks to other projects
db.network_update(self.context, network['id'],
{'host': self.network.host,
'project_id': 'otherid'})
requested_networks.append((network['uuid'], None))
# set the first network to our project
db.network_update(self.context, networks[0]['id'],
{'project_id': self.user_context.project_id})
inst = objects.Instance()
inst.host = self.compute.host
inst.display_name = HOST
inst.instance_type_id = 1
inst.uuid = FAKEUUID
inst.create(self.context)
self.assertRaises(exception.NetworkNotFoundForProject,
self.network.allocate_for_instance, self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=self.context.project_id, macs=None,
requested_networks=requested_networks)
def test_allocate_for_instance_with_mac(self):
available_macs = set(['ca:fe:de:ad:be:ef'])
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
nw_info = self.network.allocate_for_instance(self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
assigned_macs = [vif['address'] for vif in nw_info]
self.assertEqual(1, len(assigned_macs))
self.assertEqual(available_macs.pop(), assigned_macs[0])
self.network.deallocate_for_instance(self.context,
instance_id=inst['id'],
host=self.network.host,
project_id=project_id)
def test_allocate_for_instance_not_enough_macs(self):
available_macs = set()
inst = db.instance_create(self.context, {'host': self.compute.host,
'display_name': HOST,
'instance_type_id': 1})
networks = db.network_get_all(self.context)
for network in networks:
db.network_update(self.context, network['id'],
{'host': self.network.host})
project_id = self.context.project_id
self.assertRaises(exception.VirtualInterfaceCreateException,
self.network.allocate_for_instance,
self.user_context,
instance_id=inst['id'], instance_uuid=inst['uuid'],
host=inst['host'], vpn=None, rxtx_factor=3,
project_id=project_id, macs=available_macs)
class FloatingIPTestCase(test.TestCase):
"""Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.service_get_by_host_and_topic')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_disassociate_floating_ip_multi_host_calls(self, floating_get,
service_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=12)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
service_get.return_value = test_service.fake_service
self.stubs.Set(self.network.servicegroup_api,
'service_is_up',
lambda _x: True)
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_disassociate_floating_ip')
self.network.network_rpcapi._disassociate_floating_ip(
ctxt, 'fl_ip', mox.IgnoreArg(), 'some-other-host', 'instance-uuid')
self.mox.ReplayAll()
self.network.disassociate_floating_ip(ctxt, 'fl_ip', True)
@mock.patch('nova.db.fixed_ip_get_by_address')
@mock.patch('nova.db.network_get')
@mock.patch('nova.db.instance_get_by_uuid')
@mock.patch('nova.db.floating_ip_get_by_address')
def test_associate_floating_ip_multi_host_calls(self, floating_get,
inst_get, net_get,
fixed_get):
floating_ip = dict(test_floating_ip.fake_floating_ip,
fixed_ip_id=None)
fixed_ip = dict(test_fixed_ip.fake_fixed_ip,
network_id=None,
instance_uuid='instance-uuid')
network = dict(test_network.fake_network,
multi_host=True)
instance = dict(fake_instance.fake_db_instance(host='some-other-host'))
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
self.stubs.Set(self.network,
'_floating_ip_owned_by_project',
lambda _x, _y: True)
floating_get.return_value = floating_ip
fixed_get.return_value = fixed_ip
net_get.return_value = network
inst_get.return_value = instance
self.mox.StubOutWithMock(
self.network.network_rpcapi, '_associate_floating_ip')
self.network.network_rpcapi._associate_floating_ip(
ctxt, 'fl_ip', 'fix_ip', mox.IgnoreArg(), 'some-other-host',
'instance-uuid')
self.mox.ReplayAll()
self.network.associate_floating_ip(ctxt, 'fl_ip', 'fix_ip', True)
def test_double_deallocation(self):
instance_ref = db.instance_create(self.context,
{"project_id": self.project_id})
# Run it twice to make it fault if it does not handle
# instances without fixed networks
# If this fails in either, it does not handle having no addresses
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
self.network.deallocate_for_instance(self.context,
instance_id=instance_ref['id'])
def test_deallocate_floating_ip_quota_rollback(self):
ctxt = context.RequestContext('testuser', 'testproject',
is_admin=False)
def fake(*args, **kwargs):
return dict(test_floating_ip.fake_floating_ip,
address='10.0.0.1', fixed_ip_id=None,
project_id=ctxt.project_id)
self.stubs.Set(self.network.db, 'floating_ip_get_by_address', fake)
self.mox.StubOutWithMock(db, 'floating_ip_deallocate')
self.mox.StubOutWithMock(self.network,
'_floating_ip_owned_by_project')
self.mox.StubOutWithMock(quota.QUOTAS, 'reserve')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
quota.QUOTAS.reserve(self.context,
floating_ips=-1,
project_id='testproject').AndReturn('fake-rsv')
self.network._floating_ip_owned_by_project(self.context,
mox.IgnoreArg())
db.floating_ip_deallocate(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn(None)
quota.QUOTAS.rollback(self.context, 'fake-rsv',
project_id='testproject')
self.mox.ReplayAll()
self.network.deallocate_floating_ip(self.context, '10.0.0.1')
def test_deallocation_deleted_instance(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.deleted = True
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
def test_deallocation_duplicate_floating_ip(self):
self.stubs.Set(self.network, '_teardown_network_on_host',
lambda *args, **kwargs: None)
instance = objects.Instance()
instance.project_id = self.project_id
instance.create(self.context)
network = db.network_create_safe(self.context.elevated(), {
'project_id': self.project_id,
'host': CONF.host,
'label': 'foo'})
fixed = db.fixed_ip_create(self.context, {'allocated': True,
'instance_uuid': instance.uuid, 'address': '10.1.1.1',
'network_id': network['id']})
db.floating_ip_create(self.context, {
'address': '10.10.10.10',
'deleted': True})
db.floating_ip_create(self.context, {
'address': '10.10.10.10', 'instance_uuid': instance.uuid,
'fixed_ip_id': fixed['id'],
'project_id': self.project_id})
self.network.deallocate_for_instance(self.context, instance=instance)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_get_by_address')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_start(self, floating_update, floating_get,
fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
floating_get.side_effect = fake_floating_ip_get_by_address
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_remove_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
def fake_clean_conntrack(fixed_ip):
if not str(fixed_ip) == "10.0.0.2":
raise exception.FixedIpInvalid(address=fixed_ip)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'remove_floating_ip',
fake_remove_floating_ip)
self.stubs.Set(self.network.driver, 'clean_conntrack',
fake_clean_conntrack)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_start(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
rxtx_factor=3,
project_id=self.project_id,
source='fake_source',
dest='fake_dest')
self.assertEqual(called['count'], 2)
@mock.patch('nova.db.fixed_ip_get')
@mock.patch('nova.db.floating_ip_update')
def test_migrate_instance_finish(self, floating_update, fixed_get):
called = {'count': 0}
def fake_floating_ip_get_by_address(context, address):
return dict(test_floating_ip.fake_floating_ip,
address=address,
fixed_ip_id=0)
def fake_is_stale_floating_ip_address(context, floating_ip):
return str(floating_ip.address) == '172.24.4.23'
fixed_get.return_value = dict(test_fixed_ip.fake_fixed_ip,
instance_uuid='fake_uuid',
address='10.0.0.2',
network=test_network.fake_network)
floating_update.return_value = fake_floating_ip_get_by_address(
None, '1.2.3.4')
def fake_add_floating_ip(floating_addr, fixed_addr, interface,
network):
called['count'] += 1
self.stubs.Set(self.network.db, 'floating_ip_get_by_address',
fake_floating_ip_get_by_address)
self.stubs.Set(self.network, '_is_stale_floating_ip_address',
fake_is_stale_floating_ip_address)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
fake_add_floating_ip)
self.mox.ReplayAll()
addresses = ['172.24.4.23', '172.24.4.24', '172.24.4.25']
self.network.migrate_instance_finish(self.context,
instance_uuid=FAKEUUID,
floating_addresses=addresses,
host='fake_dest',
rxtx_factor=3,
project_id=self.project_id,
source='fake_source')
self.assertEqual(called['count'], 2)
def test_floating_dns_create_conflict(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.assertRaises(exception.FloatingIpDNSExists,
self.network.add_dns_entry, self.context,
address1, name1, "A", zone)
def test_floating_create_and_get(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertFalse(entries)
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.network.get_dns_entries_by_name(self.context,
name1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_floating_dns_delete(self):
zone = "example.org"
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.network.add_dns_entry(self.context, address1, name1, "A", zone)
self.network.add_dns_entry(self.context, address1, name2, "A", zone)
self.network.delete_dns_entry(self.context, name1, zone)
entries = self.network.get_dns_entries_by_address(self.context,
address1, zone)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.network.delete_dns_entry, self.context,
name1, zone)
def test_floating_dns_domains_public(self):
zone1 = "testzone"
domain1 = "example.org"
domain2 = "example.com"
address1 = '10.10.10.10'
entryname = 'testentry'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_public_dns_domain, self.context,
domain1, zone1)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 2)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[1]['domain'], domain2)
self.assertEqual(domains[0]['project'], 'testproject')
self.assertEqual(domains[1]['project'], 'fakeproject')
self.network.add_dns_entry(self.context, address1, entryname,
'A', domain1)
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
# Verify that deleting the domain deleted the associated entry
entries = self.network.get_dns_entries_by_name(self.context,
entryname, domain1)
self.assertFalse(entries)
def test_delete_all_by_ip(self):
domain1 = "example.org"
domain2 = "example.com"
address = "10.10.10.10"
name1 = "foo"
name2 = "bar"
def fake_domains(context):
return [{'domain': 'example.org', 'scope': 'public'},
{'domain': 'example.com', 'scope': 'public'},
{'domain': 'test.example.org', 'scope': 'public'}]
self.stubs.Set(self.network, 'get_dns_domains', fake_domains)
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.network.create_public_dns_domain(context_admin, domain1,
'testproject')
self.network.create_public_dns_domain(context_admin, domain2,
'fakeproject')
domains = self.network.get_dns_domains(self.context)
for domain in domains:
self.network.add_dns_entry(self.context, address,
name1, "A", domain['domain'])
self.network.add_dns_entry(self.context, address,
name2, "A", domain['domain'])
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertEqual(len(entries), 2)
self.network._delete_all_entries_for_ip(self.context, address)
for domain in domains:
entries = self.network.get_dns_entries_by_address(self.context,
address,
domain['domain'])
self.assertFalse(entries)
self.network.delete_dns_domain(context_admin, domain1)
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
# Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
# Create a VIF with aa:aa:aa:aa:aa:aa
crash_test_dummy_vif = {
'address': macs[1],
'instance_uuid': 'fake_uuid',
'network_id': 123,
'uuid': 'fake_uuid',
}
self.network.db.virtual_interface_create(ctxt, crash_test_dummy_vif)
# Hand out a collision first, then a legit MAC
def fake_gen_mac():
return macs.pop()
self.stubs.Set(utils, 'generate_mac_address', fake_gen_mac)
# SQLite doesn't seem to honor the uniqueness constraint on the
# address column, so fake the collision-avoidance here
def fake_vif_save(vif):
if vif.address == crash_test_dummy_vif['address']:
raise db_exc.DBError("If you're smart, you'll retry!")
# NOTE(russellb) The VirtualInterface object requires an ID to be
# set, and we expect it to get set automatically when we do the
# save.
vif.id = 1
self.stubs.Set(models.VirtualInterface, 'save', fake_vif_save)
# Attempt to add another and make sure that both MACs are consumed
# by the retry loop
self.network._add_virtual_interface(ctxt, 'fake_uuid', 123)
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.deallocate_floating_ip,
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.associate_floating_ip,
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
exception.FloatingIpNotFoundForAddress(address='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.disassociate_floating_ip,
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
# Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
self.mox.ReplayAll()
self.assertRaises(messaging.ExpectedException,
self.network.get_floating_ip,
self.context, 'fake-id')
def _test_associate_floating_ip_failure(self, stdout, expected_exception):
def _fake_catchall(*args, **kwargs):
return dict(test_fixed_ip.fake_fixed_ip,
network=test_network.fake_network)
def _fake_add_floating_ip(*args, **kwargs):
raise processutils.ProcessExecutionError(stdout)
self.stubs.Set(self.network.db, 'floating_ip_fixed_ip_associate',
_fake_catchall)
self.stubs.Set(self.network.db, 'floating_ip_disassociate',
_fake_catchall)
self.stubs.Set(self.network.l3driver, 'add_floating_ip',
_fake_add_floating_ip)
self.assertRaises(expected_exception,
self.network._associate_floating_ip, self.context,
'1.2.3.4', '1.2.3.5', '', '')
def test_associate_floating_ip_failure(self):
self._test_associate_floating_ip_failure(None,
processutils.ProcessExecutionError)
def test_associate_floating_ip_failure_interface_not_found(self):
self._test_associate_floating_ip_failure('Cannot find device',
exception.NoFloatingIpInterface)
@mock.patch('nova.objects.FloatingIP.get_by_address')
def test_get_floating_ip_by_address(self, mock_get):
mock_get.return_value = mock.sentinel.floating
self.assertEqual(mock.sentinel.floating,
self.network.get_floating_ip_by_address(
self.context,
mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
@mock.patch('nova.objects.FloatingIPList.get_by_project')
def test_get_floating_ips_by_project(self, mock_get):
mock_get.return_value = mock.sentinel.floatings
self.assertEqual(mock.sentinel.floatings,
self.network.get_floating_ips_by_project(
self.context))
mock_get.assert_called_once_with(self.context, self.context.project_id)
@mock.patch('nova.objects.FloatingIPList.get_by_fixed_address')
def test_get_floating_ips_by_fixed_address(self, mock_get):
mock_get.return_value = [objects.FloatingIP(address='1.2.3.4'),
objects.FloatingIP(address='5.6.7.8')]
self.assertEqual(['1.2.3.4', '5.6.7.8'],
self.network.get_floating_ips_by_fixed_address(
self.context, mock.sentinel.address))
mock_get.assert_called_once_with(self.context, mock.sentinel.address)
class InstanceDNSTestCase(test.TestCase):
"""Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = self.useFixture(fixtures.TempDir()).path
self.flags(log_dir=self.tempdir)
self.flags(use_local=True, group='conductor')
self.network = TestFloatingIPManager()
self.network.db = db
self.project_id = 'testproject'
self.context = context.RequestContext('testuser', self.project_id,
is_admin=False)
def test_dns_domains_private(self):
zone1 = 'testzone'
domain1 = 'example.org'
context_admin = context.RequestContext('testuser', 'testproject',
is_admin=True)
self.assertRaises(exception.AdminRequired,
self.network.create_private_dns_domain, self.context,
domain1, zone1)
self.network.create_private_dns_domain(context_admin, domain1, zone1)
domains = self.network.get_dns_domains(self.context)
self.assertEqual(len(domains), 1)
self.assertEqual(domains[0]['domain'], domain1)
self.assertEqual(domains[0]['availability_zone'], zone1)
self.assertRaises(exception.AdminRequired,
self.network.delete_dns_domain, self.context,
domain1)
self.network.delete_dns_domain(context_admin, domain1)
domain1 = "example.org"
domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
"""Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
self.useFixture(test.ReplaceModule('ldap', fake_ldap))
dns_class = 'nova.network.ldapdns.LdapDNS'
self.driver = importutils.import_object(dns_class)
attrs = {'objectClass': ['domainrelatedobject', 'dnsdomain',
'domain', 'dcobject', 'top'],
'associateddomain': ['root'],
'dc': ['root']}
self.driver.lobj.add_s("ou=hosts,dc=example,dc=org", attrs.items())
self.driver.create_domain(domain1)
self.driver.create_domain(domain2)
def tearDown(self):
self.driver.delete_domain(domain1)
self.driver.delete_domain(domain2)
super(LdapDNSTestCase, self).tearDown()
def test_ldap_dns_domains(self):
domains = self.driver.get_domains()
self.assertEqual(len(domains), 2)
self.assertIn(domain1, domains)
self.assertIn(domain2, domains)
def test_ldap_dns_create_conflict(self):
address1 = "10.10.10.11"
name1 = "foo"
self.driver.create_entry(name1, address1, "A", domain1)
self.assertRaises(exception.FloatingIpDNSExists,
self.driver.create_entry,
name1, address1, "A", domain1)
def test_ldap_dns_create_and_get(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertFalse(entries)
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.assertEqual(entries[0], name1)
self.assertEqual(entries[1], name2)
entries = self.driver.get_entries_by_name(name1, domain1)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], address1)
def test_ldap_dns_delete(self):
address1 = "10.10.10.11"
name1 = "foo"
name2 = "bar"
self.driver.create_entry(name1, address1, "A", domain1)
self.driver.create_entry(name2, address1, "A", domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
self.assertEqual(len(entries), 2)
self.driver.delete_entry(name1, domain1)
entries = self.driver.get_entries_by_address(address1, domain1)
LOG.debug("entries: %s" % entries)
self.assertEqual(len(entries), 1)
self.assertEqual(entries[0], name2)
self.assertRaises(exception.NotFound,
self.driver.delete_entry,
name1, domain1)
| apache-2.0 |
cysnake4713/server-tools | mass_editing/models/ir_model_fields.py | 42 | 1801 | # -*- coding: utf-8 -*-
##############################################################################
#
# This module uses OpenERP, Open Source Management Solution Framework.
# Copyright (C):
# 2012-Today Serpent Consulting Services (<http://www.serpentcs.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import orm
class IrModelFields(orm.Model):
_inherit = 'ir.model.fields'
def search(
self, cr, uid, args, offset=0, limit=0, order=None, context=None,
count=False):
model_domain = []
for domain in args:
if (len(domain) > 2 and
domain[0] == 'model_id' and
isinstance(domain[2], basestring)):
model_domain += [
('model_id', 'in', map(int, domain[2][1:-1].split(',')))
]
else:
model_domain.append(domain)
return super(IrModelFields, self).search(
cr, uid, model_domain, offset=offset, limit=limit, order=order,
context=context, count=count
)
| agpl-3.0 |
zofuthan/edx-platform | common/lib/xmodule/xmodule/modulestore/edit_info.py | 201 | 2843 | """
Access methods to get EditInfo for xblocks
"""
from xblock.fields import XBlockMixin
from abc import ABCMeta, abstractmethod
class EditInfoMixin(XBlockMixin):
"""
Provides the interfaces for getting the edit info from XBlocks
"""
@property
def edited_by(self):
"""
The user id of the last user to change this xblock content, children, or settings.
"""
return self.runtime.get_edited_by(self)
@property
def edited_on(self):
"""
The datetime of the last change to this xblock content, children, or settings.
"""
return self.runtime.get_edited_on(self)
@property
def subtree_edited_by(self):
"""
The user id of the last user to change content, children, or settings in this xblock's subtree
"""
return self.runtime.get_subtree_edited_by(self)
@property
def subtree_edited_on(self):
"""
The datetime of the last change content, children, or settings in this xblock's subtree
"""
return self.runtime.get_subtree_edited_on(self)
@property
def published_by(self):
"""
The user id of the last user to publish this specific xblock (or a previous version of it).
"""
return self.runtime.get_published_by(self)
@property
def published_on(self):
"""
The datetime of the last time this specific xblock was published.
"""
return self.runtime.get_published_on(self)
class EditInfoRuntimeMixin(object):
"""
An abstract mixin class for the functions which the :class: `EditInfoMixin` methods call on the runtime
"""
__metaclass__ = ABCMeta
@abstractmethod
def get_edited_by(self, xblock):
"""
The datetime of the last change to this xblock content, children, or settings.
"""
pass
@abstractmethod
def get_edited_on(self, xblock):
"""
The datetime of the last change to this xblock content, children, or settings.
"""
pass
@abstractmethod
def get_subtree_edited_by(self, xblock):
"""
The user id of the last user to change content, children, or settings in this xblock's subtree
"""
pass
@abstractmethod
def get_subtree_edited_on(self, xblock):
"""
The datetime of the last change content, children, or settings in this xblock's subtree
"""
pass
@abstractmethod
def get_published_by(self, xblock):
"""
The user id of the last user to publish this specific xblock (or a previous version of it).
"""
pass
@abstractmethod
def get_published_on(self, xblock):
"""
The datetime of the last time this specific xblock was published.
"""
pass
| agpl-3.0 |
whitepages/nova | nova/virt/disk/mount/api.py | 47 | 10352 | # Copyright 2011 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Support for mounting virtual image files."""
import os
import time
from oslo_log import log as logging
from oslo_utils import importutils
from nova import exception
from nova.i18n import _, _LI, _LW
from nova import utils
from nova.virt.image import model as imgmodel
LOG = logging.getLogger(__name__)
MAX_DEVICE_WAIT = 30
class Mount(object):
"""Standard mounting operations, that can be overridden by subclasses.
The basic device operations provided are get, map and mount,
to be called in that order.
"""
mode = None # to be overridden in subclasses
@staticmethod
def instance_for_format(image, mountdir, partition):
"""Get a Mount instance for the image type
:param image: instance of nova.virt.image.model.Image
:param mountdir: path to mount the image at
:param partition: partition number to mount
"""
LOG.debug("Instance for format image=%(image)s "
"mountdir=%(mountdir)s partition=%(partition)s",
{'image': image, 'mountdir': mountdir,
'partition': partition})
if isinstance(image, imgmodel.LocalFileImage):
if image.format == imgmodel.FORMAT_RAW:
LOG.debug("Using LoopMount")
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
image, mountdir, partition)
else:
LOG.debug("Using NbdMount")
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
image, mountdir, partition)
else:
# TODO(berrange) we could mount images of
# type LocalBlockImage directly without
# involving loop or nbd devices
#
# We could also mount RBDImage directly
# using kernel RBD block dev support.
#
# This is left as an enhancement for future
# motivated developers todo, since raising
# an exception is on par with what this
# code did historically
raise exception.UnsupportedImageModel(
image.__class__.__name__)
@staticmethod
def instance_for_device(image, mountdir, partition, device):
"""Get a Mount instance for the device type
:param image: instance of nova.virt.image.model.Image
:param mountdir: path to mount the image at
:param partition: partition number to mount
:param device: mounted device path
"""
LOG.debug("Instance for device image=%(image)s "
"mountdir=%(mountdir)s partition=%(partition)s "
"device=%(device)s",
{'image': image, 'mountdir': mountdir,
'partition': partition, 'device': device})
if "loop" in device:
LOG.debug("Using LoopMount")
return importutils.import_object(
"nova.virt.disk.mount.loop.LoopMount",
image, mountdir, partition, device)
else:
LOG.debug("Using NbdMount")
return importutils.import_object(
"nova.virt.disk.mount.nbd.NbdMount",
image, mountdir, partition, device)
def __init__(self, image, mount_dir, partition=None, device=None):
"""Create a new Mount instance
:param image: instance of nova.virt.image.model.Image
:param mount_dir: path to mount the image at
:param partition: partition number to mount
:param device: mounted device path
"""
# Input
self.image = image
self.partition = partition
self.mount_dir = mount_dir
# Output
self.error = ""
# Internal
self.linked = self.mapped = self.mounted = self.automapped = False
self.device = self.mapped_device = device
# Reset to mounted dir if possible
self.reset_dev()
def reset_dev(self):
"""Reset device paths to allow unmounting."""
if not self.device:
return
self.linked = self.mapped = self.mounted = True
device = self.device
if os.path.isabs(device) and os.path.exists(device):
if device.startswith('/dev/mapper/'):
device = os.path.basename(device)
device, self.partition = device.rsplit('p', 1)
self.device = os.path.join('/dev', device)
def get_dev(self):
"""Make the image available as a block device in the file system."""
self.device = None
self.linked = True
return True
def _get_dev_retry_helper(self):
"""Some implementations need to retry their get_dev."""
# NOTE(mikal): This method helps implement retries. The implementation
# simply calls _get_dev_retry_helper from their get_dev, and implements
# _inner_get_dev with their device acquisition logic. The NBD
# implementation has an example.
start_time = time.time()
device = self._inner_get_dev()
while not device:
LOG.info(_LI('Device allocation failed. Will retry in 2 seconds.'))
time.sleep(2)
if time.time() - start_time > MAX_DEVICE_WAIT:
LOG.warning(_LW('Device allocation failed after repeated '
'retries.'))
return False
device = self._inner_get_dev()
return True
def _inner_get_dev(self):
raise NotImplementedError()
def unget_dev(self):
"""Release the block device from the file system namespace."""
self.linked = False
def map_dev(self):
"""Map partitions of the device to the file system namespace."""
assert(os.path.exists(self.device))
LOG.debug("Map dev %s", self.device)
automapped_path = '/dev/%sp%s' % (os.path.basename(self.device),
self.partition)
if self.partition == -1:
self.error = _('partition search unsupported with %s') % self.mode
elif self.partition and not os.path.exists(automapped_path):
map_path = '/dev/mapper/%sp%s' % (os.path.basename(self.device),
self.partition)
assert(not os.path.exists(map_path))
# Note kpartx can output warnings to stderr and succeed
# Also it can output failures to stderr and "succeed"
# So we just go on the existence of the mapped device
_out, err = utils.trycmd('kpartx', '-a', self.device,
run_as_root=True, discard_warnings=True)
# Note kpartx does nothing when presented with a raw image,
# so given we only use it when we expect a partitioned image, fail
if not os.path.exists(map_path):
if not err:
err = _('partition %s not found') % self.partition
self.error = _('Failed to map partitions: %s') % err
else:
self.mapped_device = map_path
self.mapped = True
elif self.partition and os.path.exists(automapped_path):
# Note auto mapping can be enabled with the 'max_part' option
# to the nbd or loop kernel modules. Beware of possible races
# in the partition scanning for _loop_ devices though
# (details in bug 1024586), which are currently uncatered for.
self.mapped_device = automapped_path
self.mapped = True
self.automapped = True
else:
self.mapped_device = self.device
self.mapped = True
return self.mapped
def unmap_dev(self):
"""Remove partitions of the device from the file system namespace."""
if not self.mapped:
return
LOG.debug("Unmap dev %s", self.device)
if self.partition and not self.automapped:
utils.execute('kpartx', '-d', self.device, run_as_root=True)
self.mapped = False
self.automapped = False
def mnt_dev(self):
"""Mount the device into the file system."""
LOG.debug("Mount %(dev)s on %(dir)s",
{'dev': self.mapped_device, 'dir': self.mount_dir})
_out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir,
discard_warnings=True, run_as_root=True)
if err:
self.error = _('Failed to mount filesystem: %s') % err
LOG.debug(self.error)
return False
self.mounted = True
return True
def unmnt_dev(self):
"""Unmount the device from the file system."""
if not self.mounted:
return
self.flush_dev()
LOG.debug("Umount %s", self.mapped_device)
utils.execute('umount', self.mapped_device, run_as_root=True)
self.mounted = False
def flush_dev(self):
pass
def do_mount(self):
"""Call the get, map and mnt operations."""
status = False
try:
status = self.get_dev() and self.map_dev() and self.mnt_dev()
finally:
if not status:
LOG.debug("Fail to mount, tearing back down")
self.do_teardown()
return status
def do_umount(self):
"""Call the unmnt operation."""
if self.mounted:
self.unmnt_dev()
def do_teardown(self):
"""Call the umnt, unmap, and unget operations."""
if self.mounted:
self.unmnt_dev()
if self.mapped:
self.unmap_dev()
if self.linked:
self.unget_dev()
| apache-2.0 |
PandaWei/avocado | selftests/unit/test_vm.py | 2 | 2537 | #!/usr/bin/env python
import unittest
from flexmock import flexmock, flexmock_teardown
from avocado.core.remote import VMTestRunner
from avocado.core import virt
JSON_RESULTS = ('Something other than json\n'
'{"tests": [{"test": "sleeptest.1", "url": "sleeptest", '
'"status": "PASS", "time": 1.23, "start": 0, "end": 1.23}],'
'"debuglog": "/home/user/avocado/logs/run-2014-05-26-15.45.'
'37/debug.log", "errors": 0, "skip": 0, "time": 1.4, '
'"start": 0, "end": 1.4, "pass": 1, "failures": 0, "total": '
'1}\nAdditional stuff other than json')
class _FakeVM(virt.VM):
"""
Fake VM-inherited object (it's better to inherit it, than to flexmock the
isinstance)
"""
def __init__(self): # don't call virt.VM.__init__ pylint: disable=W0231
self.snapshot = True
self.domain = flexmock(isActive=lambda: True)
class VMTestRunnerSetup(unittest.TestCase):
""" Tests the VMTestRunner setup() method """
def setUp(self):
mock_vm = flexmock(_FakeVM())
flexmock(virt).should_receive('vm_connect').and_return(mock_vm).once().ordered()
mock_vm.should_receive('start').and_return(True).once().ordered()
mock_vm.should_receive('create_snapshot').once().ordered()
# VMTestRunner()
Args = flexmock(test_result_total=1,
url=['/tests/sleeptest', '/tests/other/test',
'passtest'],
vm_domain='domain',
vm_username='username',
vm_hostname='hostname',
vm_port=22,
vm_password='password',
vm_key_file=None,
vm_cleanup=True,
vm_no_copy=False,
vm_timeout=120,
vm_hypervisor_uri='my_hypervisor_uri',
env_keep=None)
log = flexmock()
log.should_receive("info")
job = flexmock(args=Args, log=log)
self.runner = VMTestRunner(job, None)
mock_vm.should_receive('stop').once().ordered()
mock_vm.should_receive('restore_snapshot').once().ordered()
def tearDown(self):
flexmock_teardown()
def test_setup(self):
""" Tests VMTestRunner.setup() """
self.runner.setup()
self.runner.tear_down()
flexmock_teardown()
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
jplusplus/broken-promises | Sources/brokenpromises/__init__.py | 1 | 3482 | #!/usr/bin/env python
# Encoding: utf-8
# -----------------------------------------------------------------------------
# Project : Broken Promises
# -----------------------------------------------------------------------------
# Author : Edouard Richard <edou4rd@gmail.com>
# -----------------------------------------------------------------------------
# License : GNU General Public License
# -----------------------------------------------------------------------------
# Creation : 28-Oct-2013
# Last mod : 12-Nov-2013
# -----------------------------------------------------------------------------
# This file is part of Broken Promises.
#
# Broken Promises is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Broken Promises is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Broken Promises. If not, see <http://www.gnu.org/licenses/>.
from models import Article, Report
import os
import importlib
ENVIRONMENT_VARIABLE = "BP_SETTINGS"
class Settings:
def __init__(self):
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
except KeyError or not settings_module:
raise Exception("Settings are not configured. You must define the environment variable '%s'" % (ENVIRONMENT_VARIABLE))
mod = importlib.import_module(settings_module)
for setting in dir(mod):
if setting == setting.upper():
setattr(self, setting, getattr(mod, setting))
def __getitem__(self, name): return getattr(self, name)
settings = Settings()
# launch scheduled jobs
import datetime
from brokenpromises.worker import worker
from rq_scheduler import Scheduler
from brokenpromises.operations import CollectNext7days, CollectNext2Months, CollectNext2Years, CollectToday, MrClean
import redis
conn = redis.from_url(settings.REDIS_URL)
scheduler = Scheduler(connection=conn)
scheduled_jobs = scheduler.get_jobs()
# remove all jobs with interval
for job in scheduled_jobs:
if "RunAndReplaceIntTheQueuePeriodically" in job.description:
scheduler.cancel(job)
today = datetime.datetime.now()
# net midnight
next_midnight = today + datetime.timedelta(days=1)
next_midnight = datetime.datetime(next_midnight.year, next_midnight.month, next_midnight.day, 0, 10)
# next month
year = today.year + (today.month + 1) / 12
month = today.month % 12 + 1
next_month = datetime.datetime(year, month, 1, 0, 10)
#next new year
next_year = datetime.datetime(today.year + 1, 1, 1, 0, 20)
# enqueue periodic jobs
worker.schedule_periodically(date=next_midnight, frequence="daily" , collector=CollectToday())
worker.schedule_periodically(date=next_midnight, frequence="daily" , collector=CollectNext7days())
worker.schedule_periodically(date=next_midnight, frequence="daily" , collector=MrClean())
worker.schedule_periodically(date=next_month , frequence="monthly", collector=CollectNext2Months())
worker.schedule_periodically(date=next_year , frequence="yearly" , collector=CollectNext2Years())
# EOF
| gpl-3.0 |
blacklin/kbengine | kbe/src/lib/python/Lib/multiprocessing/popen_spawn_win32.py | 102 | 2998 | import os
import msvcrt
import signal
import sys
import _winapi
from . import context
from . import spawn
from . import reduction
from . import util
__all__ = ['Popen']
#
#
#
TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#
class Popen(object):
'''
Start a subprocess to run the code of a process object
'''
method = 'spawn'
def __init__(self, process_obj):
prep_data = spawn.get_preparation_data(process_obj._name)
# read end of pipe will be "stolen" by the child process
# -- see spawn_main() in spawn.py.
rhandle, whandle = _winapi.CreatePipe(None, 0)
wfd = msvcrt.open_osfhandle(whandle, 0)
cmd = spawn.get_command_line(parent_pid=os.getpid(),
pipe_handle=rhandle)
cmd = ' '.join('"%s"' % x for x in cmd)
with open(wfd, 'wb', closefd=True) as to_child:
# start process
try:
hp, ht, pid, tid = _winapi.CreateProcess(
spawn.get_executable(), cmd,
None, None, False, 0, None, None, None)
_winapi.CloseHandle(ht)
except:
_winapi.CloseHandle(rhandle)
raise
# set attributes of self
self.pid = pid
self.returncode = None
self._handle = hp
self.sentinel = int(hp)
util.Finalize(self, _winapi.CloseHandle, (self.sentinel,))
# send information to child
context.set_spawning_popen(self)
try:
reduction.dump(prep_data, to_child)
reduction.dump(process_obj, to_child)
finally:
context.set_spawning_popen(None)
def duplicate_for_child(self, handle):
assert self is context.get_spawning_popen()
return reduction.duplicate(handle, self.sentinel)
def wait(self, timeout=None):
if self.returncode is None:
if timeout is None:
msecs = _winapi.INFINITE
else:
msecs = max(0, int(timeout * 1000 + 0.5))
res = _winapi.WaitForSingleObject(int(self._handle), msecs)
if res == _winapi.WAIT_OBJECT_0:
code = _winapi.GetExitCodeProcess(self._handle)
if code == TERMINATE:
code = -signal.SIGTERM
self.returncode = code
return self.returncode
def poll(self):
return self.wait(timeout=0)
def terminate(self):
if self.returncode is None:
try:
_winapi.TerminateProcess(int(self._handle), TERMINATE)
except OSError:
if self.wait(timeout=1.0) is None:
raise
| lgpl-3.0 |
titasakgm/brc-stock | openerp/addons/account/project/report/inverted_analytic_balance.py | 56 | 5710 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp import pooler
from openerp.report import report_sxw
class account_inverted_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_inverted_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'lines_g': self._lines_g,
'lines_a': self._lines_a,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'sum_balance': self._sum_balance,
'sum_quantity': self._sum_quantity,
})
def _lines_g(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT aa.name AS name, aa.code AS code, "
"sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, aa.id AS id \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) "
"AND (aal.account_id IN %s) "
"AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code, aa.id "
"ORDER BY aal.code",
(tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _lines_a(self, accounts, general_account_id, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, "
"aaa.code AS code, aaa.name AS name, account_id \
FROM account_analytic_line AS aal, "
"account_analytic_account AS aaa \
WHERE aal.account_id=aaa.id AND aal.account_id IN %s "
"AND aal.general_account_id=%s AND aal.date>=%s "
"AND aal.date<=%s \
GROUP BY aal.account_id, general_account_id, aaa.code, aaa.name "
"ORDER BY aal.account_id",
(tuple(ids), general_account_id, date1, date2))
res = self.cr.dictfetchall()
aaa_obj = self.pool.get('account.analytic.account')
res2 = aaa_obj.read(self.cr, self.uid, ids, ['complete_name'])
complete_name = {}
for r in res2:
complete_name[r['id']] = r['complete_name']
for r in res:
r['complete_name'] = complete_name[r['account_id']]
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _sum_debit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT -sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_debit(accounts, date1, date2)
credit = self._sum_credit(accounts, date1, date2)
return (debit-credit)
def _sum_quantity(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
report_sxw.report_sxw('report.account.analytic.account.inverted.balance', 'account.analytic.account', 'addons/account/project/report/inverted_analytic_balance.rml',parser=account_inverted_analytic_balance, header="internal")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Split-Screen/android_kernel_asus_fugu | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
wkeyword/pip | pip/_vendor/requests/packages/urllib3/util/connection.py | 679 | 3293 | import socket
try:
from select import poll, POLLIN
except ImportError: # `poll` doesn't exist on OSX and other platforms
poll = False
try:
from select import select
except ImportError: # `select` doesn't exist on AppEngine.
select = False
def is_connection_dropped(conn): # Platform-specific
"""
Returns True if the connection is dropped and should be closed.
:param conn:
:class:`httplib.HTTPConnection` object.
Note: For platforms like AppEngine, this will always return ``False`` to
let the platform handle connection recycling transparently for us.
"""
sock = getattr(conn, 'sock', False)
if sock is False: # Platform-specific: AppEngine
return False
if sock is None: # Connection already closed (such as by httplib).
return True
if not poll:
if not select: # Platform-specific: AppEngine
return False
try:
return select([sock], [], [], 0.0)[0]
except socket.error:
return True
# This version is better on platforms that support it.
p = poll()
p.register(sock, POLLIN)
for (fno, ev) in p.poll(0.0):
if fno == sock.fileno():
# Either data is buffered (bad), or the connection is dropped.
return True
# This function is copied from socket.py in the Python 2.7 standard
# library test suite. Added to its signature is only `socket_options`.
def create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
source_address=None, socket_options=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
# If provided, set socket level options before connecting.
# This is the only addition urllib3 makes to this function.
_set_socket_options(sock, socket_options)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
sock = None
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def _set_socket_options(sock, options):
if options is None:
return
for opt in options:
sock.setsockopt(*opt)
| mit |
tacid/ajenti | ajenti/plugins/health/widget.py | 17 | 1120 | from ajenti.ui import *
from ajenti import apis
from ajenti.com import implements, Plugin
from ajenti.api import *
class HealthWidget(Plugin):
implements(apis.dashboard.IWidget)
title = 'Health'
icon = '/dl/health/icon.png'
name = 'Health monitor'
style = 'linear'
def get_ui(self, cfg, id=None):
self.mon = ComponentManager.get().find('health-monitor')
text = { 'good': 'GOOD', 'susp': 'WARNING', 'dang': 'DANGER' }
stat = { 'good': 'info', 'susp': 'warn', 'dang': 'err' }
ostat = 'good'
for m in sorted(self.mon.get(), key=lambda x:x.name):
st = self.mon.get()[m]
if st == 'susp' and ostat == 'good':
ostat = st
if st == 'dang':
ostat = st
ui = self.app.inflate('health:widget')
ui.find('overall').text = text[ostat]
ui.find('overall')['class'] = 'status-cell-%s'%stat[ostat]
return ui
def handle(self, event, params, cfg, vars=None):
pass
def get_config_dialog(self):
pass
def process_config(self, vars):
pass
| lgpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-cdn/azure/mgmt/cdn/models/operation_display.py | 1 | 1503 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationDisplay(Model):
"""The object that represents the operation.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar provider: Service provider: Microsoft.Cdn
:vartype provider: str
:ivar resource: Resource on which the operation is performed: Profile,
endpoint, etc.
:vartype resource: str
:ivar operation: Operation type: Read, write, delete, etc.
:vartype operation: str
"""
_validation = {
'provider': {'readonly': True},
'resource': {'readonly': True},
'operation': {'readonly': True},
}
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
}
def __init__(self):
super(OperationDisplay, self).__init__()
self.provider = None
self.resource = None
self.operation = None
| mit |
mazimkhan/mbed-ls | mbed_lstools/lstools_darwin.py | 2 | 7223 | """
mbed SDK
Copyright (c) 2011-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
import subprocess
import plistlib
import platform
from .lstools_base import MbedLsToolsBase
class MbedLsToolsDarwin(MbedLsToolsBase):
""" MbedLsToolsDarwin supports mbed-enabled platforms detection on Mac OS X
"""
mbed_volume_name_match = re.compile(r'(\bmbed\b|\bSEGGER MSD\b)', re.I)
def list_mbeds(self):
""" returns mbed list with platform names if possible
"""
result = []
# {volume_id: {serial:, vendor_id:, product_id:, tty:}}
volumes = self.get_mbed_volumes()
# {volume_id: mount_point}
mounts = self.get_mount_points()
volumes_keys = set(volumes.keys())
mounts_keys = set(mounts.keys())
intersection = volumes_keys & mounts_keys
valid_volumes = {}
for key in intersection:
valid_volumes[key] = volumes[key]
# put together all of that info into the expected format:
result = [
{
'mount_point': mounts[v],
'serial_port': volumes[v]['tty'],
'target_id': self.target_id(volumes[v]),
'platform_name': self.platform_name(self.target_id(volumes[v]))
} for v in valid_volumes
]
self.ERRORLEVEL_FLAG = 0
# if we're missing any platform names, try to fill those in by reading
# mbed.htm:
for i, _ in enumerate(result):
if None in result[i]:
self.ERRORLEVEL_FLAG = -1
continue
if result[i]['mount_point']:
# Deducing mbed-enabled TargetID based on available targetID definition DB.
# If TargetID from USBID is not recognized we will try to check URL in mbed.htm
htm_target_id = self.get_mbed_htm_target_id(result[i]['mount_point'])
if htm_target_id:
result[i]['target_id_usb_id'] = result[i]['target_id']
result[i]['target_id'] = htm_target_id
result[i]['platform_name'] = self.platform_name(htm_target_id[:4])
result[i]['target_id_mbed_htm'] = htm_target_id
return result
def get_mount_points(self):
''' Returns map {volume_id: mount_point} '''
# list disks, this gives us disk name, and volume name + mount point:
diskutil_ls = subprocess.Popen(['diskutil', 'list', '-plist'], stdout=subprocess.PIPE)
disks = plistlib.readPlist(diskutil_ls.stdout)
diskutil_ls.wait()
r = {}
for disk in disks['AllDisksAndPartitions']:
mount_point = None
if 'MountPoint' in disk:
mount_point = disk['MountPoint']
r[disk['DeviceIdentifier']] = mount_point
return r
def get_mbed_volumes(self):
''' returns a map {volume_id: {serial:, vendor_id:, product_id:, tty:}'''
# to find all the possible mbed volumes, we look for registry entries
# under all possible USB bus which have a "BSD Name" that starts with "disk"
# (i.e. this is a USB disk), and have a IORegistryEntryName that
# matches /\cmbed/
# Once we've found a disk, we can search up for a parent with a valid
# serial number, and then search down again to find a tty that's part
# of the same composite device
# ioreg -a -r -n <usb_controller_name> -l
usb_controllers = ['AppleUSBXHCI', 'AppleUSBUHCI', 'AppleUSBEHCI', 'AppleUSBOHCI', 'IOUSBHostDevice']
usb_bus = []
cmp_par = '-n'
# For El Captain we need to list all the instances of (-c) rather than compare names (-n)
mac_ver = float('.'.join(platform.mac_ver()[0].split('.')[:2])) # Returns mac version as float XX.YY
if mac_ver >= 10.11:
cmp_par = '-c'
for usb_controller in usb_controllers:
ioreg_usb = subprocess.Popen(['ioreg', '-a', '-r', cmp_par, usb_controller, '-l'], stdout=subprocess.PIPE)
try:
usb_bus = usb_bus + plistlib.readPlist(ioreg_usb.stdout)
except:
# Catch when no output is returned from ioreg command
pass
ioreg_usb.wait()
r = {}
def findTTYRecursive(obj):
''' return the first tty (AKA IODialinDevice) that we can find in the
children of the specified object, or None if no tty is present.
'''
if 'IODialinDevice' in obj:
return obj['IODialinDevice']
if 'IORegistryEntryChildren' in obj:
for child in obj['IORegistryEntryChildren']:
found = findTTYRecursive(child)
if found:
return found
return None
def findVolumesRecursive(obj, parents):
if 'BSD Name' in obj and obj['BSD Name'].startswith('disk') and \
self.mbed_volume_name_match.search(obj['IORegistryEntryName']):
disk_id = obj['BSD Name']
# now search up through our parents until we find a serial number:
usb_info = {
'serial':None,
'vendor_id':None,
'product_id':None,
'tty':None,
}
for parent in [obj] + parents:
if 'USB Serial Number' in parent:
usb_info['serial'] = parent['USB Serial Number']
if 'idVendor' in parent and 'idProduct' in parent:
usb_info['vendor_id'] = parent['idVendor']
usb_info['product_id'] = parent['idProduct']
if usb_info['serial']:
# stop at the first one we find (or we'll pick up hubs,
# etc.), but first check for a tty that's also a child of
# this device:
usb_info['tty'] = findTTYRecursive(parent)
break
r[disk_id] = usb_info
if 'IORegistryEntryChildren' in obj:
for child in obj['IORegistryEntryChildren']:
findVolumesRecursive(child, [obj] + parents)
for obj in usb_bus:
findVolumesRecursive(obj, [])
return r
def target_id(self, usb_info):
if usb_info['serial'] is not None:
return usb_info['serial']
else:
return None
def platform_name(self, target_id):
if target_id[:4] in self.manufacture_ids:
return self.manufacture_ids[target_id[:4]]
| apache-2.0 |
ogenstad/ansible | lib/ansible/playbook/attribute.py | 39 | 4253 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from copy import deepcopy
class Attribute:
def __init__(
self,
isa=None,
private=False,
default=None,
required=False,
listof=None,
priority=0,
class_type=None,
always_post_validate=False,
inherit=True,
alias=None,
extend=False,
prepend=False,
):
"""
:class:`Attribute` specifies constraints for attributes of objects which
derive from playbook data. The attributes of the object are basically
a schema for the yaml playbook.
:kwarg isa: The type of the attribute. Allowable values are a string
representation of any yaml basic datatype, python class, or percent.
(Enforced at post-validation time).
:kwarg private: (not used)
:kwarg default: Default value if unspecified in the YAML document.
:kwarg required: Whether or not the YAML document must contain this field.
If the attribute is None when post-validated, an error will be raised.
:kwarg listof: If isa is set to "list", this can optionally be set to
ensure that all elements in the list are of the given type. Valid
values here are the same as those for isa.
:kwarg priority: The order in which the fields should be parsed. Generally
this does not need to be set, it is for rare situations where another
field depends on the fact that another field was parsed first.
:kwarg class_type: If isa is set to "class", this can be optionally set to
a class (not a string name). The YAML data for this field will be
passed to the __init__ method of that class during post validation and
the field will be an instance of that class.
:kwarg always_post_validate: Controls whether a field should be post
validated or not (default: True).
:kwarg inherit: A boolean value, which controls whether the object
containing this field should attempt to inherit the value from its
parent object if the local value is None.
:kwarg alias: An alias to use for the attribute name, for situations where
the attribute name may conflict with a Python reserved word.
"""
self.isa = isa
self.private = private
self.default = default
self.required = required
self.listof = listof
self.priority = priority
self.class_type = class_type
self.always_post_validate = always_post_validate
self.inherit = inherit
self.alias = alias
self.extend = extend
self.prepend = prepend
if default is not None and self.isa in ('list', 'dict', 'set'):
self.default = deepcopy(default)
else:
self.default = default
def __eq__(self, other):
return other.priority == self.priority
def __ne__(self, other):
return other.priority != self.priority
# NB: higher priority numbers sort first
def __lt__(self, other):
return other.priority < self.priority
def __gt__(self, other):
return other.priority > self.priority
def __le__(self, other):
return other.priority <= self.priority
def __ge__(self, other):
return other.priority >= self.priority
class FieldAttribute(Attribute):
pass
| gpl-3.0 |
chacoroot/planetary | openerp/addons/base/ir/ir_values.py | 228 | 26238 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pickle
from openerp import tools
from openerp.osv import osv, fields
from openerp.osv.orm import except_orm
EXCLUDED_FIELDS = set((
'report_sxw_content', 'report_rml_content', 'report_sxw', 'report_rml',
'report_sxw_content_data', 'report_rml_content_data', 'search_view', ))
#: Possible slots to bind an action to with :meth:`~.set_action`
ACTION_SLOTS = [
"client_action_multi", # sidebar wizard action
"client_print_multi", # sidebar report printing button
"client_action_relate", # sidebar related link
"tree_but_open", # double-click on item in tree view
"tree_but_action", # deprecated: same as tree_but_open
]
class ir_values(osv.osv):
"""Holds internal model-specific action bindings and user-defined default
field values. definitions. This is a legacy internal model, mixing
two different concepts, and will likely be updated or replaced in a
future version by cleaner, separate models. You should not depend
explicitly on it.
The purpose of each ``ir.values`` entry depends on its type, defined
by the ``key`` column:
* 'default': user-defined default values, used when creating new
records of this model:
* 'action': binding of an action to a particular *action slot* of
this model, making the action easily available in the user
interface for this model.
The ``key2`` column acts as a qualifier, further refining the type
of the entry. The possible values are:
* for 'default' entries: an optional condition restricting the
cases where this particular default value will be applicable,
or ``False`` for no condition
* for 'action' entries: the ``key2`` qualifier is one of the available
action slots, defining how this action can be invoked:
* ``'client_print_multi'`` for report printing actions that will
be available on views displaying items from this model
* ``'client_action_multi'`` for assistants (wizards) actions
that will be available in views displaying objects of this model
* ``'client_action_relate'`` for links towards related documents
that should be available in views displaying objects of this model
* ``'tree_but_open'`` for actions that will be triggered when
double-clicking an item from this model in a hierarchical tree view
Each entry is specific to a model (``model`` column), and for ``'actions'``
type, may even be made specific to a given record of that model when the
``res_id`` column contains a record ID (``False`` means it's global for
all records).
The content of the entry is defined by the ``value`` column, which may either
contain an arbitrary value, or a reference string defining the action that
should be executed.
.. rubric:: Usage: default values
The ``'default'`` entries are usually defined manually by the
users, and set by their UI clients calling :meth:`~.set_default`.
These default values are then automatically used by the
ORM every time a new record is about to be created, i.e. when
:meth:`~openerp.osv.osv.osv.default_get`
or :meth:`~openerp.osv.osv.osv.create` are called.
.. rubric:: Usage: action bindings
Business applications will usually bind their actions during
installation, and OpenERP UI clients will apply them as defined,
based on the list of actions included in the result of
:meth:`~openerp.osv.osv.osv.fields_view_get`,
or directly returned by explicit calls to :meth:`~.get_actions`.
"""
_name = 'ir.values'
def _value_unpickle(self, cursor, user, ids, name, arg, context=None):
res = {}
for record in self.browse(cursor, user, ids, context=context):
value = record[name[:-9]]
if record.key == 'default' and value:
# default values are pickled on the fly
try:
value = str(pickle.loads(value))
except Exception:
pass
res[record.id] = value
return res
def _value_pickle(self, cursor, user, id, name, value, arg, context=None):
if context is None:
context = {}
ctx = context.copy()
if self.CONCURRENCY_CHECK_FIELD in ctx:
del ctx[self.CONCURRENCY_CHECK_FIELD]
record = self.browse(cursor, user, id, context=context)
if record.key == 'default':
# default values are pickled on the fly
value = pickle.dumps(value)
self.write(cursor, user, id, {name[:-9]: value}, context=ctx)
def onchange_object_id(self, cr, uid, ids, object_id, context=None):
if not object_id: return {}
act = self.pool.get('ir.model').browse(cr, uid, object_id, context=context)
return {
'value': {'model': act.model}
}
def onchange_action_id(self, cr, uid, ids, action_id, context=None):
if not action_id: return {}
act = self.pool.get('ir.actions.actions').browse(cr, uid, action_id, context=context)
return {
'value': {'value_unpickle': act.type+','+str(act.id)}
}
_columns = {
'name': fields.char('Name', required=True),
'model': fields.char('Model Name', select=True, required=True,
help="Model to which this entry applies"),
# TODO: model_id and action_id should be read-write function fields
'model_id': fields.many2one('ir.model', 'Model (change only)', size=128,
help="Model to which this entry applies - "
"helper field for setting a model, will "
"automatically set the correct model name"),
'action_id': fields.many2one('ir.actions.actions', 'Action (change only)',
help="Action bound to this entry - "
"helper field for binding an action, will "
"automatically set the correct reference"),
'value': fields.text('Value', help="Default value (pickled) or reference to an action"),
'value_unpickle': fields.function(_value_unpickle, fnct_inv=_value_pickle,
type='text',
string='Default value or action reference'),
'key': fields.selection([('action','Action'),('default','Default')],
'Type', select=True, required=True,
help="- Action: an action attached to one slot of the given model\n"
"- Default: a default value for a model field"),
'key2' : fields.char('Qualifier', select=True,
help="For actions, one of the possible action slots: \n"
" - client_action_multi\n"
" - client_print_multi\n"
" - client_action_relate\n"
" - tree_but_open\n"
"For defaults, an optional condition"
,),
'res_id': fields.integer('Record ID', select=True,
help="Database identifier of the record to which this applies. "
"0 = for all records"),
'user_id': fields.many2one('res.users', 'User', ondelete='cascade', select=True,
help="If set, action binding only applies for this user."),
'company_id': fields.many2one('res.company', 'Company', ondelete='cascade', select=True,
help="If set, action binding only applies for this company")
}
_defaults = {
'key': 'action',
'key2': 'tree_but_open',
}
def _auto_init(self, cr, context=None):
super(ir_values, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_values_key_model_key2_res_id_user_id_idx\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_values_key_model_key2_res_id_user_id_idx ON ir_values (key, model, key2, res_id, user_id)')
def create(self, cr, uid, vals, context=None):
res = super(ir_values, self).create(cr, uid, vals, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def write(self, cr, uid, ids, vals, context=None):
res = super(ir_values, self).write(cr, uid, ids, vals, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def unlink(self, cr, uid, ids, context=None):
res = super(ir_values, self).unlink(cr, uid, ids, context=context)
self.get_defaults_dict.clear_cache(self)
return res
def set_default(self, cr, uid, model, field_name, value, for_all_users=True, company_id=False, condition=False):
"""Defines a default value for the given model and field_name. Any previous
default for the same scope (model, field_name, value, for_all_users, company_id, condition)
will be replaced and lost in the process.
Defaults can be later retrieved via :meth:`~.get_defaults`, which will return
the highest priority default for any given field. Defaults that are more specific
have a higher priority, in the following order (highest to lowest):
* specific to user and company
* specific to user only
* specific to company only
* global to everyone
:param string model: model name
:param string field_name: field name to which the default applies
:param value: the default field value to set
:type value: any serializable Python value
:param bool for_all_users: whether the default should apply to everybody or only
the user calling the method
:param int company_id: optional ID of the company to which the default should
apply. If omitted, the default will be global. If True
is passed, the current user's company will be used.
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: id of the newly created ir.values entry
"""
if isinstance(value, unicode):
value = value.encode('utf8')
if company_id is True:
# should be company-specific, need to get company id
user = self.pool.get('res.users').browse(cr, uid, uid)
company_id = user.company_id.id
# remove existing defaults for the same scope
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'name': field_name,
'value': pickle.dumps(value),
'model': model,
'key': 'default',
'key2': condition and condition[:200],
'user_id': False if for_all_users else uid,
'company_id': company_id,
})
def get_default(self, cr, uid, model, field_name, for_all_users=True, company_id=False, condition=False):
""" Return the default value defined for model, field_name, users, company and condition.
Return ``None`` if no such default exists.
"""
search_criteria = [
('key', '=', 'default'),
('key2', '=', condition and condition[:200]),
('model', '=', model),
('name', '=', field_name),
('user_id', '=', False if for_all_users else uid),
('company_id','=', company_id)
]
defaults = self.browse(cr, uid, self.search(cr, uid, search_criteria))
return pickle.loads(defaults[0].value.encode('utf-8')) if defaults else None
def get_defaults(self, cr, uid, model, condition=False):
"""Returns any default values that are defined for the current model and user,
(and match ``condition``, if specified), previously registered via
:meth:`~.set_default`.
Defaults are global to a model, not field-specific, but an optional
``condition`` can be provided to restrict matching default values
to those that were defined for the same condition (usually based
on another field's value).
Default values also have priorities depending on whom they apply
to: only the highest priority value will be returned for any
field. See :meth:`~.set_default` for more details.
:param string model: model name
:param string condition: optional condition specification that can be used to
restrict the applicability of the default values
(e.g. based on another field's value). This is an
opaque string as far as the API is concerned, but client
stacks typically use single-field conditions in the
form ``'key=stringified_value'``.
(Currently, the condition is trimmed to 200 characters,
so values that share the same first 200 characters always
match)
:return: list of default values tuples of the form ``(id, field_name, value)``
(``id`` is the ID of the default entry, usually irrelevant)
"""
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
LEFT JOIN res_users u ON (v.user_id = u.id)
WHERE v.key = %%s AND v.model = %%s
AND (v.user_id = %%s OR v.user_id IS NULL)
AND (v.company_id IS NULL OR
v.company_id =
(SELECT company_id from res_users where id = %%s)
)
%s
ORDER BY v.user_id, u.company_id"""
params = ('default', model, uid, uid)
if condition:
query %= 'AND v.key2 = %s'
params += (condition[:200],)
else:
query %= 'AND v.key2 is NULL'
cr.execute(query, params)
# keep only the highest priority default for each field
defaults = {}
for row in cr.dictfetchall():
defaults.setdefault(row['name'],
(row['id'], row['name'], pickle.loads(row['value'].encode('utf-8'))))
return defaults.values()
# use ormcache: this is called a lot by BaseModel.default_get()!
@tools.ormcache(skiparg=2)
def get_defaults_dict(self, cr, uid, model, condition=False):
""" Returns a dictionary mapping field names with their corresponding
default value. This method simply improves the returned value of
:meth:`~.get_defaults`.
"""
return dict((f, v) for i, f, v in self.get_defaults(cr, uid, model, condition))
def set_action(self, cr, uid, name, action_slot, model, action, res_id=False):
"""Binds an the given action to the given model's action slot - for later
retrieval via :meth:`~.get_actions`. Any existing binding of the same action
to the same slot is first removed, allowing an update of the action's name.
See the class description for more details about the various action
slots: :class:`~ir_values`.
:param string name: action label, usually displayed by UI client
:param string action_slot: the action slot to which the action should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param string action: action reference, in the form ``'model,id'``
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: id of the newly created ir.values entry
"""
assert isinstance(action, basestring) and ',' in action, \
'Action definition must be an action reference, e.g. "ir.actions.act_window,42"'
assert action_slot in ACTION_SLOTS, \
'Action slot (%s) must be one of: %r' % (action_slot, ACTION_SLOTS)
# remove existing action definition of same slot and value
search_criteria = [
('key', '=', 'action'),
('key2', '=', action_slot),
('model', '=', model),
('res_id', '=', res_id or 0), # int field -> NULL == 0
('value', '=', action),
]
self.unlink(cr, uid, self.search(cr, uid, search_criteria))
return self.create(cr, uid, {
'key': 'action',
'key2': action_slot,
'model': model,
'res_id': res_id,
'name': name,
'value': action,
})
def get_actions(self, cr, uid, action_slot, model, res_id=False, context=None):
"""Retrieves the list of actions bound to the given model's action slot.
See the class description for more details about the various action
slots: :class:`~.ir_values`.
:param string action_slot: the action slot to which the actions should be
bound to - one of ``client_action_multi``,
``client_print_multi``, ``client_action_relate``,
``tree_but_open``.
:param string model: model name
:param int res_id: optional record id - will bind the action only to a
specific record of the model, not all records.
:return: list of action tuples of the form ``(id, name, action_def)``,
where ``id`` is the ID of the default entry, ``name`` is the
action label, and ``action_def`` is a dict containing the
action definition as obtained by calling
:meth:`~openerp.osv.osv.osv.read` on the action record.
"""
assert action_slot in ACTION_SLOTS, 'Illegal action slot value: %s' % action_slot
# use a direct SQL query for performance reasons,
# this is called very often
query = """SELECT v.id, v.name, v.value FROM ir_values v
WHERE v.key = %s AND v.key2 = %s
AND v.model = %s
AND (v.res_id = %s
OR v.res_id IS NULL
OR v.res_id = 0)
ORDER BY v.id"""
cr.execute(query, ('action', action_slot, model, res_id or None))
results = {}
for action in cr.dictfetchall():
if not action['value']:
continue # skip if undefined
action_model_name, action_id = action['value'].split(',')
if action_model_name not in self.pool:
continue # unknow model? skip it
action_model = self.pool[action_model_name]
fields = [field for field in action_model._fields if field not in EXCLUDED_FIELDS]
# FIXME: needs cleanup
try:
action_def = action_model.read(cr, uid, int(action_id), fields, context)
if action_def:
if action_model_name in ('ir.actions.report.xml', 'ir.actions.act_window'):
groups = action_def.get('groups_id')
if groups:
cr.execute('SELECT 1 FROM res_groups_users_rel WHERE gid IN %s AND uid=%s',
(tuple(groups), uid))
if not cr.fetchone():
if action['name'] == 'Menuitem':
raise osv.except_osv('Error!',
'You do not have the permission to perform this operation!!!')
continue
# keep only the first action registered for each action name
results[action['name']] = (action['id'], action['name'], action_def)
except except_orm:
continue
return sorted(results.values())
def _map_legacy_model_list(self, model_list, map_fn, merge_results=False):
"""Apply map_fn to the various models passed, according to
legacy way to specify models/records.
"""
assert isinstance(model_list, (list, tuple)), \
"model_list should be in the form [model,..] or [(model,res_id), ..]"
results = []
for model in model_list:
res_id = False
if isinstance(model, (list, tuple)):
model, res_id = model
result = map_fn(model, res_id)
# some of the functions return one result at a time (tuple or id)
# and some return a list of many of them - care for both
if merge_results:
results.extend(result)
else:
results.append(result)
return results
# Backards-compatibility adapter layer to retrofit into split API
def set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=False, preserve_user=False, company=False):
"""Deprecated legacy method to set default values and bind actions to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.set_default`
(``key=='default'``) or :meth:`~.set_action` (``key == 'action'``).
:deprecated: As of v6.1, ``set_default()`` or ``set_action()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_set(model,res_id):
return self.set_default(cr, uid, model, field_name=name, value=value,
for_all_users=(not preserve_user), company_id=company,
condition=key2)
elif key == 'action':
def do_set(model,res_id):
return self.set_action(cr, uid, name, action_slot=key2, model=model, action=value, res_id=res_id)
return self._map_legacy_model_list(models, do_set)
def get(self, cr, uid, key, key2, models, meta=False, context=None, res_id_req=False, without_user=True, key2_req=True):
"""Deprecated legacy method to get the list of default values or actions bound to models' action slots.
Now dispatches to the newer API methods according to the value of ``key``: :meth:`~.get_defaults`
(``key=='default'``) or :meth:`~.get_actions` (``key == 'action'``)
:deprecated: As of v6.1, ``get_defaults()`` or ``get_actions()`` should be used directly.
"""
assert key in ['default', 'action'], "ir.values entry keys must be in ['default','action']"
if key == 'default':
def do_get(model,res_id):
return self.get_defaults(cr, uid, model, condition=key2)
elif key == 'action':
def do_get(model,res_id):
return self.get_actions(cr, uid, action_slot=key2, model=model, res_id=res_id, context=context)
return self._map_legacy_model_list(models, do_get, merge_results=True)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jmesteve/openerp | openerp/report/common.py | 457 | 3337 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
pageSize = {
'A4': (210,297),
'A5': (148.5,105)
}
odt_namespace = {
"office":"{urn:oasis:names:tc:opendocument:xmlns:office:1.0}",
"style":"{urn:oasis:names:tc:opendocument:xmlns:style:1.0}",
"text":"{urn:oasis:names:tc:opendocument:xmlns:text:1.0}",
"table":"{urn:oasis:names:tc:opendocument:xmlns:table:1.0}",
"draw":"{urn:oasis:names:tc:opendocument:xmlns:drawing:1.0}",
"fo":"{urn:oasis:names:tc:opendocument:xmlns:xsl-fo-compatible:1.0}",
"xlink":"{http://www.w3.org/1999/xlink}",
"dc":"{http://purl.org/dc/elements/1.1/}",
"meta":"{urn:oasis:names:tc:opendocument:xmlns:meta:1.0}",
"number":"{urn:oasis:names:tc:opendocument:xmlns:datastyle:1.0}",
"svg":"{urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0}",
"chart":"{urn:oasis:names:tc:opendocument:xmlns:chart:1.0}",
"dr3d":"{urn:oasis:names:tc:opendocument:xmlns:dr3d:1.0}",
"math":"{http://www.w3.org/1998/Math/MathML}",
"form":"{urn:oasis:names:tc:opendocument:xmlns:form:1.0}",
"script":"{urn:oasis:names:tc:opendocument:xmlns:script:1.0}",
"ooo":"{http://openoffice.org/2004/office}",
"ooow":"{http://openoffice.org/2004/writer}",
"oooc":"{http://openoffice.org/2004/calc}",
"dom":"{http://www.w3.org/2001/xml-events}" }
sxw_namespace = {
"office":"{http://openoffice.org/2000/office}",
"style":"{http://openoffice.org/2000/style}",
"text":"{http://openoffice.org/2000/text}",
"table":"{http://openoffice.org/2000/table}",
"draw":"{http://openoffice.org/2000/drawing}",
"fo":"{http://www.w3.org/1999/XSL/Format}",
"xlink":"{http://www.w3.org/1999/xlink}",
"dc":"{http://purl.org/dc/elements/1.1/}",
"meta":"{http://openoffice.org/2000/meta}",
"number":"{http://openoffice.org/2000/datastyle}",
"svg":"{http://www.w3.org/2000/svg}",
"chart":"{http://openoffice.org/2000/chart}",
"dr3d":"{http://openoffice.org/2000/dr3d}",
"math":"{http://www.w3.org/1998/Math/MathML}",
"form":"{http://openoffice.org/2000/form}",
"script":"{http://openoffice.org/2000/script}",
"ooo":"{http://openoffice.org/2004/office}",
"ooow":"{http://openoffice.org/2004/writer}",
"oooc":"{http://openoffice.org/2004/calc}",
"dom":"{http://www.w3.org/2001/xml-events}"}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
shahankhatch/pyethereum | ethereum/blocks.py | 1 | 56339 | # ####### dev hack flags ###############
dump_block_on_failed_verification = False
# ######################################
import time
from itertools import count
import sys
from collections import Iterable
import rlp
from rlp.sedes import big_endian_int, Binary, binary, CountableList
from rlp.utils import decode_hex, encode_hex
from ethereum import pruning_trie as trie
from ethereum.pruning_trie import Trie
from ethereum.securetrie import SecureTrie
from ethereum import utils
from ethereum.utils import address, int256, trie_root, hash32, to_string, big_endian_to_int
from ethereum import processblock
from ethereum.transactions import Transaction
from ethereum import bloom
from ethereum.exceptions import UnknownParentException, VerificationFailed
from ethereum.slogging import get_logger
from ethereum.ethpow import check_pow
from ethereum.db import BaseDB
from ethereum.config import Env, default_config
if sys.version_info.major == 2:
from repoze.lru import lru_cache
else:
from functools import lru_cache
log = get_logger('eth.block')
log_state = get_logger('eth.msg.state')
Log = processblock.Log
class lazy_encode(object):
def __init__(self, data):
self.data = data
def encode_entry(self, entry):
def safe_encode(value):
# handle binary data (assuming all str are binary)
if isinstance(value, (str, unicode)):
return encode_hex(value)
# handle non binary data, eg.: floats, ints ...
return repr(value)
if not isinstance(entry, Iterable):
raise ValueError('entry must be iterable')
entry = list(entry) # consume the iterable
if len(entry) == 3:
name, addr, v = entry
if name == 'code':
return [name, safe_encode(addr), safe_encode(v)]
else:
return [name, safe_encode(addr), v]
if len(entry) == 4:
name, addr, k, v = entry
return [
name,
safe_encode(addr),
safe_encode(k),
safe_encode(v),
]
raise ValueError('Unexpected entry format {}'.format(entry))
def __repr__(self):
return repr([self.encode_entry(entry) for entry in self.data])
def __str__(self):
return str(repr(self))
# Difficulty adjustment algo
def calc_difficulty(parent, timestamp):
config = parent.config
offset = parent.difficulty // config['BLOCK_DIFF_FACTOR']
if parent.number >= (config['METROPOLIS_FORK_BLKNUM'] - 1):
sign = max(len(parent.uncles) - ((timestamp - parent.timestamp) // config['METROPOLIS_DIFF_ADJUSTMENT_CUTOFF']), -99)
elif parent.number >= (config['HOMESTEAD_FORK_BLKNUM'] - 1):
sign = max(1 - ((timestamp - parent.timestamp) // config['HOMESTEAD_DIFF_ADJUSTMENT_CUTOFF']), -99)
else:
sign = 1 if timestamp - parent.timestamp < config['DIFF_ADJUSTMENT_CUTOFF'] else -1
# If we enter a special mode where the genesis difficulty starts off below
# the minimal difficulty, we allow low-difficulty blocks (this will never
# happen in the official protocol)
o = int(max(parent.difficulty + offset * sign, min(parent.difficulty, config['MIN_DIFF'])))
period_count = (parent.number + 1) // config['EXPDIFF_PERIOD']
if period_count >= config['EXPDIFF_FREE_PERIODS']:
o = max(o + 2 ** (period_count - config['EXPDIFF_FREE_PERIODS']), config['MIN_DIFF'])
# print('Calculating difficulty of block %d, timestamp difference %d, parent diff %d, child diff %d' % (parent.number + 1, timestamp - parent.timestamp, parent.difficulty, o))
return o
class Account(rlp.Serializable):
"""An Ethereum account.
:ivar nonce: the account's nonce (the number of transactions sent by the
account)
:ivar balance: the account's balance in wei
:ivar storage: the root of the account's storage trie
:ivar code_hash: the SHA3 hash of the code associated with the account
:ivar db: the database in which the account's code is stored
"""
fields = [
('nonce', big_endian_int),
('balance', big_endian_int),
('storage', trie_root),
('code_hash', hash32)
]
def __init__(self, nonce, balance, storage, code_hash, db):
assert isinstance(db, BaseDB)
self.db = db
super(Account, self).__init__(nonce, balance, storage, code_hash)
@property
def code(self):
"""The EVM code of the account.
This property will be read from or written to the db at each access,
with :ivar:`code_hash` used as key.
"""
return self.db.get(self.code_hash)
@code.setter
def code(self, value):
self.code_hash = utils.sha3(value)
# Technically a db storage leak, but doesn't really matter; the only
# thing that fails to get garbage collected is when code disappears due
# to a suicide
self.db.inc_refcount(self.code_hash, value)
@classmethod
def blank_account(cls, db, initial_nonce=0):
"""Create a blank account
The returned account will have zero nonce and balance, a blank storage
trie and empty code.
:param db: the db in which the account will store its code.
"""
code_hash = utils.sha3(b'')
db.put(code_hash, b'')
return cls(initial_nonce, 0, trie.BLANK_ROOT, code_hash, db)
class Receipt(rlp.Serializable):
fields = [
('state_root', trie_root),
('gas_used', big_endian_int),
('bloom', int256),
('logs', CountableList(processblock.Log))
]
def __init__(self, state_root, gas_used, logs, bloom=None):
# does not call super.__init__ as bloom should not be an attribute but a property
self.state_root = state_root
self.gas_used = gas_used
self.logs = logs
if bloom is not None and bloom != self.bloom:
raise ValueError("Invalid bloom filter")
self._cached_rlp = None
self._mutable = True
@property
def bloom(self):
bloomables = [x.bloomables() for x in self.logs]
return bloom.bloom_from_list(utils.flatten(bloomables))
class BlockHeader(rlp.Serializable):
"""A block header.
If the block with this header exists as an instance of :class:`Block`, the
connection can be made explicit by setting :attr:`BlockHeader.block`. Then,
:attr:`BlockHeader.state_root`, :attr:`BlockHeader.tx_list_root` and
:attr:`BlockHeader.receipts_root` always refer to the up-to-date value in
the block instance.
:ivar block: an instance of :class:`Block` or `None`
:ivar prevhash: the 32 byte hash of the previous block
:ivar uncles_hash: the 32 byte hash of the RLP encoded list of uncle
headers
:ivar coinbase: the 20 byte coinbase address
:ivar state_root: the root of the block's state trie
:ivar tx_list_root: the root of the block's transaction trie
:ivar receipts_root: the root of the block's receipts trie
:ivar bloom: TODO
:ivar difficulty: the block's difficulty
:ivar number: the number of ancestors of this block (0 for the genesis
block)
:ivar gas_limit: the block's gas limit
:ivar gas_used: the total amount of gas used by all transactions in this
block
:ivar timestamp: a UNIX timestamp
:ivar extra_data: up to 1024 bytes of additional data
:ivar nonce: a 32 byte nonce constituting a proof-of-work, or the empty
string as a placeholder
"""
fields = [
('prevhash', hash32),
('uncles_hash', hash32),
('coinbase', address),
('state_root', trie_root),
('tx_list_root', trie_root),
('receipts_root', trie_root),
('bloom', int256),
('difficulty', big_endian_int),
('number', big_endian_int),
('gas_limit', big_endian_int),
('gas_used', big_endian_int),
('timestamp', big_endian_int),
('extra_data', binary),
('mixhash', binary),
('nonce', Binary(8, allow_empty=True))
]
def __init__(self,
prevhash=default_config['GENESIS_PREVHASH'],
uncles_hash=utils.sha3rlp([]),
coinbase=default_config['GENESIS_COINBASE'],
state_root=trie.BLANK_ROOT,
tx_list_root=trie.BLANK_ROOT,
receipts_root=trie.BLANK_ROOT,
bloom=0,
difficulty=default_config['GENESIS_DIFFICULTY'],
number=0,
gas_limit=default_config['GENESIS_GAS_LIMIT'],
gas_used=0,
timestamp=0,
extra_data='',
mixhash=default_config['GENESIS_MIXHASH'],
nonce=''):
# at the beginning of a method, locals() is a dict of all arguments
fields = {k: v for k, v in locals().items() if k != 'self'}
if len(fields['coinbase']) == 40:
fields['coinbase'] = decode_hex(fields['coinbase'])
assert len(fields['coinbase']) == 20
self.block = None
super(BlockHeader, self).__init__(**fields)
@classmethod
def from_block_rlp(self, rlp_data):
block_data = rlp.decode_lazy(rlp_data)
r = super(BlockHeader, self).deserialize(block_data[0])
assert isinstance(r, BlockHeader)
return r
@property
def state_root(self):
if self.block:
return self.block.state_root
else:
return self._state_root
@state_root.setter
def state_root(self, value):
if self.block:
self.block.state_root = value
else:
self._state_root = value
@property
def tx_list_root(self):
if self.block:
return self.block.tx_list_root
else:
return self._tx_list_root
@tx_list_root.setter
def tx_list_root(self, value):
if self.block:
self.block.tx_list_root = value
else:
self._tx_list_root = value
@property
def receipts_root(self):
if self.block:
return self.block.receipts_root
else:
return self._receipts_root
@receipts_root.setter
def receipts_root(self, value):
if self.block:
self.block.receipts_root = value
else:
self._receipts_root = value
@property
def hash(self):
"""The binary block hash"""
return utils.sha3(rlp.encode(self))
def hex_hash(self):
"""The hex encoded block hash"""
return encode_hex(self.hash)
@property
def mining_hash(self):
return utils.sha3(rlp.encode(self, BlockHeader.exclude(['mixhash', 'nonce'])))
def check_pow(self, nonce=None):
"""Check if the proof-of-work of the block is valid.
:param nonce: if given the proof of work function will be evaluated
with this nonce instead of the one already present in
the header
:returns: `True` or `False`
"""
log.debug('checking pow', block=self.hex_hash()[:8])
return check_pow(self.number, self.mining_hash, self.mixhash, nonce or self.nonce,
self.difficulty)
def to_dict(self):
"""Serialize the header to a readable dictionary."""
d = {}
for field in ('prevhash', 'uncles_hash', 'extra_data', 'nonce',
'mixhash'):
d[field] = b'0x' + encode_hex(getattr(self, field))
for field in ('state_root', 'tx_list_root', 'receipts_root',
'coinbase'):
d[field] = encode_hex(getattr(self, field))
for field in ('number', 'difficulty', 'gas_limit', 'gas_used',
'timestamp'):
d[field] = to_string(getattr(self, field))
d['bloom'] = encode_hex(int256.serialize(self.bloom))
assert len(d) == len(BlockHeader.fields)
return d
def __repr__(self):
return '<%s(#%d %s)>' % (self.__class__.__name__, self.number,
encode_hex(self.hash)[:8])
def __eq__(self, other):
"""Two blockheader are equal iff they have the same hash."""
return isinstance(other, BlockHeader) and self.hash == other.hash
def __hash__(self):
return big_endian_to_int(self.hash)
def __ne__(self, other):
return not self.__eq__(other)
def mirror_from(source, attributes, only_getters=True):
"""Decorator (factory) for classes that mirror some attributes from an
instance variable.
:param source: the name of the instance variable to mirror from
:param attributes: list of attribute names to mirror
:param only_getters: if true only getters but not setters are created
"""
def decorator(cls):
for attribute in attributes:
def make_gs_etter(source, attribute):
def getter(self):
return getattr(getattr(self, source), attribute)
def setter(self, value):
setattr(getattr(self, source), attribute, value)
return getter, setter
getter, setter = make_gs_etter(source, attribute)
if only_getters:
setattr(cls, attribute, property(getter))
else:
setattr(cls, attribute, property(getter, setter))
return cls
return decorator
@mirror_from('header', set(field for field, _ in BlockHeader.fields) -
set(['state_root', 'receipts_root', 'tx_list_root']),
only_getters=False)
class Block(rlp.Serializable):
"""A block.
All attributes from the block header are accessible via properties
(i.e. ``block.prevhash`` is equivalent to ``block.header.prevhash``). It
is ensured that no discrepancies between header and block occur.
:param header: the block header
:param transaction_list: a list of transactions which are replayed if the
state given by the header is not known. If the
state is known, `None` can be used instead of the
empty list.
:param uncles: a list of the headers of the uncles of this block
:param db: the database in which the block's state, transactions and
receipts are stored (required)
:param parent: optional parent which if not given may have to be loaded from
the database for replay
"""
fields = [
('header', BlockHeader),
('transaction_list', CountableList(Transaction)),
('uncles', CountableList(BlockHeader))
]
def __init__(self, header, transaction_list=[], uncles=[], env=None,
parent=None, making=False):
assert isinstance(env, Env), "No Env object given"
assert isinstance(env.db, BaseDB), "No database object given"
self.env = env # don't re-set after init
self.db = env.db
self.config = env.config
self.header = header
self.uncles = uncles
self.suicides = []
self.logs = []
self.log_listeners = []
self.refunds = 0
self.ether_delta = 0
self._get_transactions_cache = []
# Journaling cache for state tree updates
self.caches = {
'balance': {},
'nonce': {},
'code': {},
'storage': {},
'all': {}
}
self.journal = []
if self.number > 0:
self.ancestor_hashes = [self.prevhash]
else:
self.ancestor_hashes = [None] * 256
# do some consistency checks on parent if given
if parent:
if hasattr(parent, 'db') and self.db != parent.db and self.db.db != parent.db:
raise ValueError("Parent lives in different database")
if self.prevhash != parent.header.hash:
raise ValueError("Block's prevhash and parent's hash do not match")
if self.number != parent.header.number + 1:
raise ValueError("Block's number is not the successor of its parent number")
if not check_gaslimit(parent, self.gas_limit):
raise ValueError("Block's gaslimit is inconsistent with its parent's gaslimit")
if self.difficulty != calc_difficulty(parent, self.timestamp):
raise ValueError("Block's difficulty is inconsistent with its parent's difficulty")
if self.gas_used > self.gas_limit:
raise ValueError("Gas used exceeds gas limit")
if self.timestamp <= parent.header.timestamp:
raise ValueError("Timestamp equal to or before parent")
if self.timestamp >= 2 ** 256:
raise ValueError("Timestamp waaaaaaaaaaayy too large")
if self.gas_limit > self.config['MAX_GAS_LIMIT']:
raise ValueError("Block's gaslimit went too high!")
for uncle in uncles:
assert isinstance(uncle, BlockHeader)
original_values = {
'gas_used': header.gas_used,
'timestamp': header.timestamp,
'difficulty': header.difficulty,
'uncles_hash': header.uncles_hash,
'bloom': header.bloom,
'header_mutable': self.header._mutable,
}
assert self._mutable
self._cached_rlp = None
self.header._mutable = True
self.transactions = Trie(self.db, trie.BLANK_ROOT)
self.receipts = Trie(self.db, trie.BLANK_ROOT)
# replay transactions if state is unknown
state_unknown = (header.prevhash != self.config['GENESIS_PREVHASH'] and
header.number != 0 and
header.state_root != trie.BLANK_ROOT and
(len(header.state_root) != 32 or
b'validated:' + self.hash not in self.db) and
not making)
if state_unknown:
assert transaction_list is not None
if not parent:
parent = self.get_parent_header()
self.state = SecureTrie(Trie(self.db, parent.state_root))
self.transaction_count = 0
self.gas_used = 0
# replay
self.initialize(parent)
for tx in transaction_list:
success, output = processblock.apply_transaction(self, tx)
self.finalize()
else:
# trust the state root in the header
self.state = SecureTrie(Trie(self.db, header._state_root))
self.transaction_count = 0
if transaction_list:
for tx in transaction_list:
self.add_transaction_to_list(tx)
if self.transactions.root_hash != header.tx_list_root:
raise ValueError("Transaction list root hash does not match")
# receipts trie populated by add_transaction_to_list is incorrect
# (it doesn't know intermediate states), so reset it
self.receipts = Trie(self.db, header.receipts_root)
# checks ##############################
def must(what, f, symb, a, b):
if not f(a, b):
if dump_block_on_failed_verification:
sys.stderr.write('%r' % self.to_dict())
raise VerificationFailed(what, a, symb, b)
def must_equal(what, a, b):
return must(what, lambda x, y: x == y, "==", a, b)
def must_ge(what, a, b):
return must(what, lambda x, y: x >= y, ">=", a, b)
def must_le(what, a, b):
return must(what, lambda x, y: x <= y, "<=", a, b)
if parent:
must_equal('prev_hash', self.prevhash, parent.hash)
must_equal('gas_used', original_values['gas_used'], self.gas_used)
must_equal('timestamp', self.timestamp, original_values['timestamp'])
must_equal('difficulty', self.difficulty, original_values['difficulty'])
must_equal('uncles_hash', utils.sha3(rlp.encode(uncles)), original_values['uncles_hash'])
assert header.block is None
must_equal('state_root', self.state.root_hash, header.state_root)
must_equal('tx_list_root', self.transactions.root_hash, header.tx_list_root)
must_equal('receipts_root', self.receipts.root_hash, header.receipts_root)
must_equal('bloom', self.bloom, original_values['bloom'])
# from now on, trie roots refer to block instead of header
header.block = self
self.header._mutable = original_values['header_mutable']
# Basic consistency verifications
if not self.check_fields():
raise ValueError("Block is invalid")
if len(to_string(self.header.extra_data)) > self.config['MAX_EXTRADATA_LENGTH']:
raise ValueError("Extra data cannot exceed %d bytes"
% default_config['MAX_EXTRADATA_LENGTH'])
if self.header.coinbase == '':
raise ValueError("Coinbase cannot be empty address")
if not self.state.root_hash_valid():
raise ValueError("State Merkle root of block %r not found in "
"database" % self)
if (not self.is_genesis() and self.nonce and not self.header.check_pow()):
raise ValueError("PoW check failed")
if b'validated:' + self.hash not in self.db:
if self.number == 0:
self.db.put(b'validated:' + self.hash, '1')
else:
self.db.put_temporarily(b'validated:' + self.hash, '1')
@classmethod
def init_from_header(cls, header_rlp, env):
"""Create a block without specifying transactions or uncles.
:param header_rlp: the RLP encoded block header
:param env: the database for the block
"""
header = rlp.decode(header_rlp, BlockHeader, env=env)
return cls(header, None, [], env=env)
@classmethod
def init_from_parent(cls, parent, coinbase, nonce=b'', extra_data=b'',
timestamp=int(time.time()), uncles=[], env=None):
"""Create a new block based on a parent block.
The block will not include any transactions and will not be finalized.
"""
header = BlockHeader(prevhash=parent.hash,
uncles_hash=utils.sha3(rlp.encode(uncles)),
coinbase=coinbase,
state_root=parent.state_root,
tx_list_root=trie.BLANK_ROOT,
receipts_root=trie.BLANK_ROOT,
bloom=0,
difficulty=calc_difficulty(parent, timestamp),
mixhash='',
number=parent.number + 1,
gas_limit=calc_gaslimit(parent),
gas_used=0,
timestamp=timestamp,
extra_data=extra_data,
nonce=nonce)
block = Block(header, [], uncles, env=env or parent.env,
parent=parent, making=True)
block.ancestor_hashes = [parent.hash] + parent.ancestor_hashes
block.log_listeners = parent.log_listeners
return block
def check_fields(self):
"""Check that the values of all fields are well formed."""
# serialize and deserialize and check that the values didn't change
l = Block.serialize(self)
return rlp.decode(rlp.encode(l)) == l
@property
def hash(self):
"""The binary block hash
This is equivalent to ``header.hash``.
"""
return utils.sha3(rlp.encode(self.header))
def hex_hash(self):
"""The hex encoded block hash.
This is equivalent to ``header.hex_hash().
"""
return encode_hex(self.hash)
@property
def tx_list_root(self):
return self.transactions.root_hash
@tx_list_root.setter
def tx_list_root(self, value):
self.transactions = Trie(self.db, value)
@property
def receipts_root(self):
return self.receipts.root_hash
@receipts_root.setter
def receipts_root(self, value):
self.receipts = Trie(self.db, value)
@property
def state_root(self):
self.commit_state()
return self.state.root_hash
@state_root.setter
def state_root(self, value):
self.state = SecureTrie(Trie(self.db, value))
self.reset_cache()
@property
def uncles_hash(self):
return utils.sha3(rlp.encode(self.uncles))
@property
def transaction_list(self):
txs = []
for i in range(self.transaction_count):
txs.append(self.get_transaction(i))
return txs
def validate_uncles(self):
"""Validate the uncles of this block."""
if utils.sha3(rlp.encode(self.uncles)) != self.uncles_hash:
return False
if len(self.uncles) > self.config['MAX_UNCLES']:
return False
for uncle in self.uncles:
assert uncle.prevhash in self.db
if uncle.number == self.number:
log.error("uncle at same block height", block=self)
return False
# Check uncle validity
MAX_UNCLE_DEPTH = self.config['MAX_UNCLE_DEPTH']
ancestor_chain = [self] + [a for a in self.get_ancestor_list(MAX_UNCLE_DEPTH + 1) if a]
assert len(ancestor_chain) == min(self.header.number + 1, MAX_UNCLE_DEPTH + 2)
ineligible = []
# Uncles of this block cannot be direct ancestors and cannot also
# be uncles included 1-6 blocks ago
for ancestor in ancestor_chain[1:]:
ineligible.extend(ancestor.uncles)
ineligible.extend([b.header for b in ancestor_chain])
eligible_ancestor_hashes = [x.hash for x in ancestor_chain[2:]]
for uncle in self.uncles:
parent = get_block(self.env, uncle.prevhash)
if uncle.difficulty != calc_difficulty(parent, uncle.timestamp):
return False
if uncle.number != parent.number + 1:
return False
if uncle.timestamp < parent.timestamp:
return False
if not uncle.check_pow():
return False
if uncle.prevhash not in eligible_ancestor_hashes:
log.error("Uncle does not have a valid ancestor", block=self,
eligible=[encode_hex(x) for x in eligible_ancestor_hashes],
uncle_prevhash=encode_hex(uncle.prevhash))
return False
if uncle in ineligible:
log.error("Duplicate uncle", block=self,
uncle=encode_hex(utils.sha3(rlp.encode(uncle))))
return False
ineligible.append(uncle)
return True
def get_ancestor_list(self, n):
"""Return `n` ancestors of this block.
:returns: a list [p(self), p(p(self)), ..., p^n(self)]
"""
if n == 0 or self.header.number == 0:
return []
p = self.get_parent()
return [p] + p.get_ancestor_list(n - 1)
def get_ancestor_hash(self, n):
assert n > 0
while len(self.ancestor_hashes) < n:
if self.number == len(self.ancestor_hashes) - 1:
self.ancestor_hashes.append(None)
else:
self.ancestor_hashes.append(
get_block(self.env,
self.ancestor_hashes[-1]).get_parent().hash)
return self.ancestor_hashes[n - 1]
# def get_ancestor(self, n):
# return self.get_block(self.get_ancestor_hash(n))
def is_genesis(self):
"""`True` if this block is the genesis block, otherwise `False`."""
return self.header.number == 0
def _get_acct(self, address):
"""Get the account with the given address.
Note that this method ignores cached account items.
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20 or len(address) == 0
rlpdata = self.state.get(address)
if rlpdata != trie.BLANK_NODE:
acct = rlp.decode(rlpdata, Account, db=self.db)
acct._mutable = True
acct._cached_rlp = None
else:
acct = Account.blank_account(self.db, self.config['ACCOUNT_INITIAL_NONCE'])
return acct
def _get_acct_item(self, address, param):
"""Get a specific parameter of a specific account.
:param address: the address of the account (binary or hex string)
:param param: the requested parameter (`'nonce'`, `'balance'`,
`'storage'` or `'code'`)
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20 or len(address) == 0
if address in self.caches[param]:
return self.caches[param][address]
else:
account = self._get_acct(address)
o = getattr(account, param)
self.caches[param][address] = o
return o
def _set_acct_item(self, address, param, value):
"""Set a specific parameter of a specific account.
:param address: the address of the account (binary or hex string)
:param param: the requested parameter (`'nonce'`, `'balance'`,
`'storage'` or `'code'`)
:param value: the new value
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
self.set_and_journal(param, address, value)
self.set_and_journal('all', address, True)
def set_and_journal(self, cache, index, value):
prev = self.caches[cache].get(index, None)
if prev != value:
self.journal.append([cache, index, prev, value])
self.caches[cache][index] = value
def _delta_item(self, address, param, value):
"""Add a value to an account item.
If the resulting value would be negative, it is left unchanged and
`False` is returned.
:param address: the address of the account (binary or hex string)
:param param: the parameter to increase or decrease (`'nonce'`,
`'balance'`, `'storage'` or `'code'`)
:param value: can be positive or negative
:returns: `True` if the operation was successful, `False` if not
"""
new_value = self._get_acct_item(address, param) + value
if new_value < 0:
return False
self._set_acct_item(address, param, new_value % 2 ** 256)
return True
def mk_transaction_receipt(self, tx):
"""Create a receipt for a transaction."""
if self.number >= self.config["METROPOLIS_FORK_BLKNUM"]:
return Receipt(b'\x00' * 32, self.gas_used, self.logs)
else:
return Receipt(self.state_root, self.gas_used, self.logs)
def add_transaction_to_list(self, tx):
"""Add a transaction to the transaction trie.
Note that this does not execute anything, i.e. the state is not
updated.
"""
k = rlp.encode(self.transaction_count)
self.transactions.update(k, rlp.encode(tx))
r = self.mk_transaction_receipt(tx)
self.receipts.update(k, rlp.encode(r))
self.bloom |= r.bloom # int
self.transaction_count += 1
def get_transaction(self, num):
"""Get the `num`th transaction in this block.
:raises: :exc:`IndexError` if the transaction does not exist
"""
index = rlp.encode(num)
tx = self.transactions.get(index)
if tx == trie.BLANK_NODE:
raise IndexError('Transaction does not exist')
else:
return rlp.decode(tx, Transaction)
def num_transactions(self):
return self.transaction_count
def get_transactions(self):
"""Build a list of all transactions in this block."""
num = self.transaction_count
if len(self._get_transactions_cache) != num:
txs = []
for i in range(num):
txs.append(self.get_transaction(i))
self._get_transactions_cache = txs
return self._get_transactions_cache
def get_transaction_hashes(self):
"helper to check if blk contains a tx"
return [utils.sha3(self.transactions.get(rlp.encode(i)))
for i in range(self.transaction_count)]
def includes_transaction(self, tx_hash):
assert isinstance(tx_hash, bytes)
#assert self.get_transaction_hashes() == [tx.hash for tx in self.get_transactions()]
return tx_hash in self.get_transaction_hashes()
def get_receipt(self, num):
"""Get the receipt of the `num`th transaction.
:returns: an instance of :class:`Receipt`
"""
index = rlp.encode(num)
receipt = self.receipts.get(index)
if receipt == trie.BLANK_NODE:
raise IndexError('Receipt does not exist')
else:
return rlp.decode(receipt, Receipt)
def get_receipts(self):
"""Build a list of all receipts in this block."""
receipts = []
for i in count():
try:
receipts.append(self.get_receipt(i))
except IndexError:
return receipts
def get_nonce(self, address):
"""Get the nonce of an account.
:param address: the address of the account (binary or hex string)
"""
return self._get_acct_item(address, 'nonce')
def set_nonce(self, address, value):
"""Set the nonce of an account.
:param address: the address of the account (binary or hex string)
:param value: the new nonce
:returns: `True` if successful, otherwise `False`
"""
return self._set_acct_item(address, 'nonce', value)
def increment_nonce(self, address):
"""Increment the nonce of an account.
:param address: the address of the account (binary or hex string)
:returns: `True` if successful, otherwise `False`
"""
if self.get_nonce(address) == 0:
return self._delta_item(address, 'nonce', self.config['ACCOUNT_INITIAL_NONCE'] + 1)
else:
return self._delta_item(address, 'nonce', 1)
def get_balance(self, address):
"""Get the balance of an account.
:param address: the address of the account (binary or hex string)
"""
return self._get_acct_item(address, 'balance')
def set_balance(self, address, value):
"""Set the balance of an account.
:param address: the address of the account (binary or hex string)
:param value: the new balance
:returns: `True` if successful, otherwise `False`
"""
self._set_acct_item(address, 'balance', value)
def delta_balance(self, address, value):
"""Increase the balance of an account.
:param address: the address of the account (binary or hex string)
:param value: can be positive or negative
:returns: `True` if successful, otherwise `False`
"""
return self._delta_item(address, 'balance', value)
def transfer_value(self, from_addr, to_addr, value):
"""Transfer a value between two account balances.
:param from_addr: the address of the sending account (binary or hex
string)
:param to_addr: the address of the receiving account (binary or hex
string)
:param value: the (positive) value to send
:returns: `True` if successful, otherwise `False`
"""
assert value >= 0
if self.delta_balance(from_addr, -value):
return self.delta_balance(to_addr, value)
return False
def get_code(self, address):
"""Get the code of an account.
:param address: the address of the account (binary or hex string)
"""
return self._get_acct_item(address, 'code')
def set_code(self, address, value):
"""Set the code of an account.
:param address: the address of the account (binary or hex string)
:param value: the new code
:returns: `True` if successful, otherwise `False`
"""
self._set_acct_item(address, 'code', value)
def get_storage(self, address):
"""Get the trie holding an account's storage.
:param address: the address of the account (binary or hex string)
:param value: the new code
"""
storage_root = self._get_acct_item(address, 'storage')
return SecureTrie(Trie(self.db, storage_root))
def reset_storage(self, address):
self._set_acct_item(address, 'storage', b'')
CACHE_KEY = b'storage:' + address
if CACHE_KEY in self.caches:
for k in self.caches[CACHE_KEY]:
self.set_and_journal(CACHE_KEY, k, 0)
def get_storage_data(self, address, index):
"""Get a specific item in the storage of an account.
:param address: the address of the account (binary or hex string)
:param index: the index of the requested item in the storage
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
CACHE_KEY = b'storage:' + address
if CACHE_KEY in self.caches:
if index in self.caches[CACHE_KEY]:
return self.caches[CACHE_KEY][index]
key = utils.zpad(utils.coerce_to_bytes(index), 32)
storage = self.get_storage(address).get(key)
if storage:
return rlp.decode(storage, big_endian_int)
else:
return 0
def set_storage_data(self, address, index, value):
"""Set a specific item in the storage of an account.
:param address: the address of the account (binary or hex string)
:param index: the index of the item in the storage
:param value: the new value of the item
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
CACHE_KEY = b'storage:' + address
if CACHE_KEY not in self.caches:
self.caches[CACHE_KEY] = {}
self.set_and_journal('all', address, True)
self.set_and_journal(CACHE_KEY, index, value)
def account_exists(self, address):
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
return len(self.state.get(address)) > 0 or address in self.caches['all']
def add_log(self, log):
self.logs.append(log)
for L in self.log_listeners:
L(log)
def commit_state(self):
"""Commit account caches"""
"""Write the acount caches on the corresponding tries."""
changes = []
if len(self.journal) == 0:
# log_state.trace('delta', changes=[])
return
addresses = sorted(list(self.caches['all'].keys()))
for addr in addresses:
acct = self._get_acct(addr)
# storage
for field in ('balance', 'nonce', 'code', 'storage'):
if addr in self.caches[field]:
v = self.caches[field][addr]
changes.append([field, addr, v])
setattr(acct, field, v)
t = SecureTrie(Trie(self.db, acct.storage))
for k, v in self.caches.get(b'storage:' + addr, {}).items():
enckey = utils.zpad(utils.coerce_to_bytes(k), 32)
val = rlp.encode(v)
changes.append(['storage', addr, k, v])
# if self.number > 18280 and False:
# try:
# self.db.logging = True
# except:
# pass
# sys.stderr.write("pre: %r\n" % self.account_to_dict(addr)['storage'])
# sys.stderr.write("pre: %r\n" % encode_hex(self.get_storage(addr).root_hash))
# sys.stderr.write("changed: %s %s %s\n" % (encode_hex(addr), encode_hex(enckey), encode_hex(val)))
if v:
t.update(enckey, val)
else:
t.delete(enckey)
acct.storage = t.root_hash
self.state.update(addr, rlp.encode(acct))
log_state.trace('delta', changes=lazy_encode(changes))
self.reset_cache()
self.db.put_temporarily(b'validated:' + self.hash, '1')
def del_account(self, address):
"""Delete an account.
:param address: the address of the account (binary or hex string)
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
self.commit_state()
self.state.delete(address)
def account_to_dict(self, address, with_storage_root=False,
with_storage=True):
"""Serialize an account to a dictionary with human readable entries.
:param address: the 20 bytes account address
:param with_storage_root: include the account's storage root
:param with_storage: include the whole account's storage
"""
if len(address) == 40:
address = decode_hex(address)
assert len(address) == 20
if with_storage_root:
# if there are uncommited account changes the current storage root
# is meaningless
assert len(self.journal) == 0
med_dict = {}
account = self._get_acct(address)
for field in ('balance', 'nonce'):
value = self.caches[field].get(address, getattr(account, field))
med_dict[field] = to_string(value)
code = self.caches['code'].get(address, account.code)
med_dict['code'] = b'0x' + encode_hex(code)
storage_trie = SecureTrie(Trie(self.db, account.storage))
if with_storage_root:
med_dict['storage_root'] = encode_hex(storage_trie.get_root_hash())
if with_storage:
med_dict['storage'] = {}
d = storage_trie.to_dict()
subcache = self.caches.get(b'storage:' + address, {})
subkeys = [utils.zpad(utils.coerce_to_bytes(kk), 32)
for kk in list(subcache.keys())]
for k in list(d.keys()) + subkeys:
v = d.get(k, None)
v2 = subcache.get(big_endian_to_int(k), None)
hexkey = b'0x' + encode_hex(utils.zunpad(k))
if v2 is not None:
if v2 != 0:
med_dict['storage'][hexkey] = \
b'0x' + encode_hex(utils.int_to_big_endian(v2))
elif v is not None:
med_dict['storage'][hexkey] = b'0x' + encode_hex(rlp.decode(v))
return med_dict
def reset_cache(self):
"""Reset cache and journal without commiting any changes."""
self.caches = {
'all': {},
'balance': {},
'nonce': {},
'code': {},
'storage': {},
}
self.journal = []
def snapshot(self):
"""Make a snapshot of the current state to enable later reverting."""
return {
'state': self.state.root_hash,
'gas': self.gas_used,
'txs': self.transactions,
'txcount': self.transaction_count,
'suicides': self.suicides,
'logs': self.logs,
'refunds': self.refunds,
'suicides_size': len(self.suicides),
'logs_size': len(self.logs),
'journal': self.journal, # pointer to reference, so is not static
'journal_size': len(self.journal),
'ether_delta': self.ether_delta
}
def revert(self, mysnapshot):
"""Revert to a previously made snapshot.
Reverting is for example necessary when a contract runs out of gas
during execution.
"""
self.journal = mysnapshot['journal']
log_state.trace('reverting')
while len(self.journal) > mysnapshot['journal_size']:
cache, index, prev, post = self.journal.pop()
log_state.trace('%r %r %r %r' % (cache, index, prev, post))
if prev is not None:
self.caches[cache][index] = prev
else:
del self.caches[cache][index]
self.suicides = mysnapshot['suicides']
while len(self.suicides) > mysnapshot['suicides_size']:
self.suicides.pop()
self.logs = mysnapshot['logs']
while len(self.logs) > mysnapshot['logs_size']:
self.logs.pop()
self.refunds = mysnapshot['refunds']
self.state.root_hash = mysnapshot['state']
self.gas_used = mysnapshot['gas']
self.transactions = mysnapshot['txs']
self.transaction_count = mysnapshot['txcount']
self._get_transactions_cache = []
self.ether_delta = mysnapshot['ether_delta']
def initialize(self, parent):
if self.number == self.config["METROPOLIS_FORK_BLKNUM"]:
self.set_code(utils.normalize_address(self.config["METROPOLIS_STATEROOT_STORE"]), self.config["METROPOLIS_GETTER_CODE"])
self.set_code(utils.normalize_address(self.config["METROPOLIS_BLOCKHASH_STORE"]), self.config["METROPOLIS_GETTER_CODE"])
if self.number >= self.config["METROPOLIS_FORK_BLKNUM"]:
self.set_storage_data(utils.normalize_address(self.config["METROPOLIS_STATEROOT_STORE"]),
self.number % self.config["METROPOLIS_WRAPAROUND"],
parent.state_root)
self.set_storage_data(utils.normalize_address(self.config["METROPOLIS_BLOCKHASH_STORE"]),
self.number % self.config["METROPOLIS_WRAPAROUND"],
self.prevhash)
if self.number == self.config['DAO_FORK_BLKNUM']:
for acct in self.config['CHILD_DAO_LIST']:
self.transfer_value(acct, self.config['DAO_WITHDRAWER'], self.get_balance(acct))
def finalize(self):
"""Apply rewards and commit."""
delta = int(self.config['BLOCK_REWARD'] + self.config['NEPHEW_REWARD'] * len(self.uncles))
self.delta_balance(self.coinbase, delta)
self.ether_delta += delta
br = self.config['BLOCK_REWARD']
udpf = self.config['UNCLE_DEPTH_PENALTY_FACTOR']
for uncle in self.uncles:
r = int(br * (udpf + uncle.number - self.number) // udpf)
self.delta_balance(uncle.coinbase, r)
self.ether_delta += r
self.commit_state()
def to_dict(self, with_state=False, full_transactions=False,
with_storage_roots=False, with_uncles=False):
"""Serialize the block to a readable dictionary.
:param with_state: include state for all accounts
:param full_transactions: include serialized transactions (hashes
otherwise)
:param with_storage_roots: if account states are included also include
their storage roots
:param with_uncles: include uncle hashes
"""
b = {"header": self.header.to_dict()}
txlist = []
for i, tx in enumerate(self.get_transactions()):
receipt_rlp = self.receipts.get(rlp.encode(i))
receipt = rlp.decode(receipt_rlp, Receipt)
if full_transactions:
txjson = tx.to_dict()
else:
txjson = tx.hash
txlist.append({
"tx": txjson,
"medstate": encode_hex(receipt.state_root),
"gas": to_string(receipt.gas_used),
"logs": [Log.serialize(log) for log in receipt.logs],
"bloom": utils.int256.serialize(receipt.bloom)
})
b["transactions"] = txlist
if with_state:
state_dump = {}
for address, v in self.state.to_dict().items():
state_dump[encode_hex(address)] = self.account_to_dict(address, with_storage_roots)
b['state'] = state_dump
if with_uncles:
b['uncles'] = [self.__class__.deserialize_header(u)
for u in self.uncles]
return b
@property
def mining_hash(self):
return utils.sha3(rlp.encode(self.header,
BlockHeader.exclude(['nonce', 'mixhash'])))
def get_parent(self):
"""Get the parent of this block."""
if self.number == 0:
raise UnknownParentException('Genesis block has no parent')
try:
parent = get_block(self.env, self.prevhash)
except KeyError:
raise UnknownParentException(encode_hex(self.prevhash))
# assert parent.state.db.db == self.state.db.db
return parent
def get_parent_header(self):
"""Get the parent of this block."""
if self.number == 0:
raise UnknownParentException('Genesis block has no parent')
try:
parent_header = get_block_header(self.db, self.prevhash)
except KeyError:
raise UnknownParentException(encode_hex(self.prevhash))
# assert parent.state.db.db == self.state.db.db
return parent_header
def has_parent(self):
"""`True` if this block has a known parent, otherwise `False`."""
try:
self.get_parent()
return True
except UnknownParentException:
return False
def chain_difficulty(self):
"""Get the summarized difficulty.
If the summarized difficulty is not stored in the database, it will be
calculated recursively and put in the database.
"""
if self.is_genesis():
return self.difficulty
elif b'difficulty:' + encode_hex(self.hash) in self.db:
encoded = self.db.get(b'difficulty:' + encode_hex(self.hash))
return utils.decode_int(encoded)
else:
o = self.difficulty + self.get_parent().chain_difficulty()
# o += sum([uncle.difficulty for uncle in self.uncles])
self.state.db.put_temporarily(
b'difficulty:' + encode_hex(self.hash), utils.encode_int(o))
return o
def __eq__(self, other):
"""Two blocks are equal iff they have the same hash."""
return isinstance(other, (Block, CachedBlock)) and self.hash == other.hash
def __hash__(self):
return big_endian_to_int(self.hash)
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return self.number > other.number
def __lt__(self, other):
return self.number < other.number
def __repr__(self):
return '<%s(#%d %s)>' % (self.__class__.__name__, self.number, encode_hex(self.hash)[:8])
def __structlog__(self):
return encode_hex(self.hash)
# Gas limit adjustment algo
def calc_gaslimit(parent):
config = parent.config
decay = parent.gas_limit // config['GASLIMIT_EMA_FACTOR']
new_contribution = ((parent.gas_used * config['BLKLIM_FACTOR_NOM']) //
config['BLKLIM_FACTOR_DEN'] // config['GASLIMIT_EMA_FACTOR'])
gl = max(parent.gas_limit - decay + new_contribution, config['MIN_GAS_LIMIT'])
if gl < config['GENESIS_GAS_LIMIT']:
gl2 = parent.gas_limit + decay
gl = min(config['GENESIS_GAS_LIMIT'], gl2)
assert check_gaslimit(parent, gl)
return gl
def check_gaslimit(parent, gas_limit):
config = parent.config
# block.gasLimit - parent.gasLimit <= parent.gasLimit // GasLimitBoundDivisor
gl = parent.gas_limit // config['GASLIMIT_ADJMAX_FACTOR']
a = bool(abs(gas_limit - parent.gas_limit) <= gl)
b = bool(gas_limit >= config['MIN_GAS_LIMIT'])
return a and b
class CachedBlock(Block):
# note: immutable refers to: do not manipulate!
_hash_cached = None
def _set_acct_item(self):
raise NotImplementedError
def set_state_root(self):
raise NotImplementedError
def revert(self):
raise NotImplementedError
def commit_state(self):
pass
def __hash__(self):
return big_endian_to_int(self.hash)
@property
def hash(self):
if not self._hash_cached:
self._hash_cached = super(CachedBlock, self).hash
return self._hash_cached
@classmethod
def create_cached(cls, blk):
blk.__class__ = CachedBlock
log.debug('created cached block', blk=blk)
return blk
def get_block_header(db, blockhash):
assert isinstance(db, BaseDB)
bh = BlockHeader.from_block_rlp(db.get(blockhash))
if bh.hash != blockhash:
log.warn('BlockHeader.hash is broken')
assert bh.hash == blockhash
return bh
@lru_cache(1024)
def get_block(env, blockhash):
"""
Assumption: blocks loaded from the db are not manipulated
-> can be cached including hash
"""
assert isinstance(env, Env)
blk = rlp.decode(env.db.get(blockhash), Block, env=env)
return CachedBlock.create_cached(blk)
def genesis(env, **kwargs):
""" Build the genesis block. """
assert isinstance(env, Env)
allowed_args = set([
'start_alloc',
'prevhash',
'coinbase',
'difficulty',
'gas_limit',
'timestamp',
'extra_data',
'mixhash',
'nonce',
])
assert set(kwargs.keys()).issubset(allowed_args)
# https://ethereum.etherpad.mozilla.org/11
start_alloc = kwargs.get('start_alloc', env.config['GENESIS_INITIAL_ALLOC'])
header = BlockHeader(
prevhash=kwargs.get('prevhash', env.config['GENESIS_PREVHASH']),
uncles_hash=utils.sha3(rlp.encode([])),
coinbase=kwargs.get('coinbase', env.config['GENESIS_COINBASE']),
state_root=trie.BLANK_ROOT,
tx_list_root=trie.BLANK_ROOT,
receipts_root=trie.BLANK_ROOT,
bloom=0,
difficulty=kwargs.get('difficulty', env.config['GENESIS_DIFFICULTY']),
number=0,
gas_limit=kwargs.get('gas_limit', env.config['GENESIS_GAS_LIMIT']),
gas_used=0,
timestamp=kwargs.get('timestamp', 0),
extra_data=kwargs.get('extra_data', env.config['GENESIS_EXTRA_DATA']),
mixhash=kwargs.get('mixhash', env.config['GENESIS_MIXHASH']),
nonce=kwargs.get('nonce', env.config['GENESIS_NONCE']),
)
block = Block(header, [], [], env=env)
for addr, data in start_alloc.items():
if len(addr) == 40:
addr = decode_hex(addr)
assert len(addr) == 20
if 'wei' in data:
block.set_balance(addr, utils.parse_int_or_hex(data['wei']))
if 'balance' in data:
block.set_balance(addr, utils.parse_int_or_hex(data['balance']))
if 'code' in data:
block.set_code(addr, utils.scanners['bin'](data['code']))
if 'nonce' in data:
block.set_nonce(addr, utils.parse_int_or_hex(data['nonce']))
if 'storage' in data:
for k, v in data['storage'].items():
block.set_storage_data(addr, big_endian_to_int(decode_hex(k[2:])),
big_endian_to_int(decode_hex(v[2:])))
block.commit_state()
block.state.db.commit()
# genesis block has predefined state root (so no additional finalization
# necessary)
return block
def dump_genesis_block_tests_data(env):
assert isinstance(env, Env)
import json
g = genesis(env)
data = dict(
genesis_state_root=encode_hex(g.state_root),
genesis_hash=g.hex_hash(),
genesis_rlp_hex=encode_hex(g.serialize()),
initial_alloc=dict()
)
for addr, balance in env.config['GENESIS_INITIAL_ALLOC'].items():
data['initial_alloc'][addr] = to_string(balance)
print(json.dumps(data, indent=1))
| mit |
henryr/Impala | thirdparty/thrift-0.9.0/test/py/RunClientServer.py | 30 | 7142 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import division
import time
import subprocess
import sys
import os
import signal
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--genpydirs', type='string', dest='genpydirs',
default='default,slots,newstyle,newstyleslots,dynamic,dynamicslots',
help='directory extensions for generated code, used as suffixes for \"gen-py-*\" added sys.path for individual tests')
parser.add_option("--port", type="int", dest="port", default=9090,
help="port number for server to listen on")
parser.add_option('-v', '--verbose', action="store_const",
dest="verbose", const=2,
help="verbose output")
parser.add_option('-q', '--quiet', action="store_const",
dest="verbose", const=0,
help="minimal output")
parser.set_defaults(verbose=1)
options, args = parser.parse_args()
generated_dirs = []
for gp_dir in options.genpydirs.split(','):
generated_dirs.append('gen-py-%s' % (gp_dir))
SCRIPTS = ['SerializationTest.py', 'TestEof.py', 'TestSyntax.py', 'TestSocket.py']
FRAMED = ["TNonblockingServer"]
SKIP_ZLIB = ['TNonblockingServer', 'THttpServer']
SKIP_SSL = ['TNonblockingServer', 'THttpServer']
EXTRA_DELAY = dict(TProcessPoolServer=3.5)
PROTOS= [
'accel',
'binary',
'compact' ]
SERVERS = [
"TSimpleServer",
"TThreadedServer",
"TThreadPoolServer",
"TProcessPoolServer", # new!
"TForkingServer",
"TNonblockingServer",
"THttpServer" ]
# Test for presence of multiprocessing module, and if it is not present, then
# remove it from the list of available servers.
try:
import multiprocessing
except:
print 'Warning: the multiprocessing module is unavailable. Skipping tests for TProcessPoolServer'
SERVERS.remove('TProcessPoolServer')
try:
import ssl
except:
print 'Warning, no ssl module available. Skipping all SSL tests.'
SKIP_SSL.extend(SERVERS)
# commandline permits a single class name to be specified to override SERVERS=[...]
if len(args) == 1:
if args[0] in SERVERS:
SERVERS = args
else:
print 'Unavailable server type "%s", please choose one of: %s' % (args[0], SERVERS)
sys.exit(0)
def relfile(fname):
return os.path.join(os.path.dirname(__file__), fname)
def runScriptTest(genpydir, script):
script_args = [sys.executable, relfile(script) ]
script_args.append('--genpydir=%s' % genpydir)
serverproc = subprocess.Popen(script_args)
print '\nTesting script: %s\n----' % (' '.join(script_args))
ret = subprocess.call(script_args)
if ret != 0:
raise Exception("Script subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(script_args)))
def runServiceTest(genpydir, server_class, proto, port, use_zlib, use_ssl):
# Build command line arguments
server_args = [sys.executable, relfile('TestServer.py') ]
cli_args = [sys.executable, relfile('TestClient.py') ]
for which in (server_args, cli_args):
which.append('--genpydir=%s' % genpydir)
which.append('--proto=%s' % proto) # accel, binary or compact
which.append('--port=%d' % port) # default to 9090
if use_zlib:
which.append('--zlib')
if use_ssl:
which.append('--ssl')
if options.verbose == 0:
which.append('-q')
if options.verbose == 2:
which.append('-v')
# server-specific option to select server class
server_args.append(server_class)
# client-specific cmdline options
if server_class in FRAMED:
cli_args.append('--framed')
if server_class == 'THttpServer':
cli_args.append('--http=/')
if options.verbose > 0:
print 'Testing server %s: %s' % (server_class, ' '.join(server_args))
serverproc = subprocess.Popen(server_args)
time.sleep(0.15)
try:
if options.verbose > 0:
print 'Testing client: %s' % (' '.join(cli_args))
ret = subprocess.call(cli_args)
if ret != 0:
raise Exception("Client subprocess failed, retcode=%d, args: %s" % (ret, ' '.join(cli_args)))
finally:
# check that server didn't die
serverproc.poll()
if serverproc.returncode is not None:
print 'FAIL: Server process (%s) failed with retcode %d' % (' '.join(server_args), serverproc.returncode)
raise Exception('Server subprocess %s died, args: %s' % (server_class, ' '.join(server_args)))
else:
extra_sleep = EXTRA_DELAY.get(server_class, 0)
if extra_sleep > 0 and options.verbose > 0:
print 'Giving %s (proto=%s,zlib=%s,ssl=%s) an extra %d seconds for child processes to terminate via alarm' % (server_class,
proto, use_zlib, use_ssl, extra_sleep)
time.sleep(extra_sleep)
os.kill(serverproc.pid, signal.SIGKILL)
# wait for shutdown
time.sleep(0.05)
test_count = 0
# run tests without a client/server first
print '----------------'
print ' Executing individual test scripts with various generated code directories'
print ' Directories to be tested: ' + ', '.join(generated_dirs)
print ' Scripts to be tested: ' + ', '.join(SCRIPTS)
print '----------------'
for genpydir in generated_dirs:
for script in SCRIPTS:
runScriptTest(genpydir, script)
print '----------------'
print ' Executing Client/Server tests with various generated code directories'
print ' Servers to be tested: ' + ', '.join(SERVERS)
print ' Directories to be tested: ' + ', '.join(generated_dirs)
print ' Protocols to be tested: ' + ', '.join(PROTOS)
print ' Options to be tested: ZLIB(yes/no), SSL(yes/no)'
print '----------------'
for try_server in SERVERS:
for genpydir in generated_dirs:
for try_proto in PROTOS:
for with_zlib in (False, True):
# skip any servers that don't work with the Zlib transport
if with_zlib and try_server in SKIP_ZLIB:
continue
for with_ssl in (False, True):
# skip any servers that don't work with SSL
if with_ssl and try_server in SKIP_SSL:
continue
test_count += 1
if options.verbose > 0:
print '\nTest run #%d: (includes %s) Server=%s, Proto=%s, zlib=%s, SSL=%s' % (test_count, genpydir, try_server, try_proto, with_zlib, with_ssl)
runServiceTest(genpydir, try_server, try_proto, options.port, with_zlib, with_ssl)
if options.verbose > 0:
print 'OK: Finished (includes %s) %s / %s proto / zlib=%s / SSL=%s. %d combinations tested.' % (genpydir, try_server, try_proto, with_zlib, with_ssl, test_count)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.