repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
wireload/screenly-ose | lib/utils.py | 1 | 6862 | import certifi
import db
import json
import os
import pytz
import random
import re
import requests
import string
from datetime import datetime, timedelta
from distutils.util import strtobool
from netifaces import ifaddresses, gateways
from os import getenv, path, utime
from platform import machine
from settings import settings, ZmqPublisher
from sh import grep, netstat, ErrorReturnCode_1
from subprocess import check_output, call
from threading import Thread
from urlparse import urlparse
from assets_helper import update
arch = machine()
HTTP_OK = xrange(200, 299)
# This will only work on the Raspberry Pi,
# so let's wrap it in a try/except so that
# Travis can run.
try:
from sh import omxplayer
except ImportError:
pass
# This will work on x86-based machines
if machine() in ['x86', 'x86_64']:
try:
from sh import ffprobe, mplayer
except ImportError:
pass
def string_to_bool(string):
return bool(strtobool(str(string)))
def touch(path):
with open(path, 'a'):
utime(path, None)
def is_ci():
"""
Returns True when run on Travis.
"""
return string_to_bool(os.getenv('CI', False))
def validate_url(string):
"""Simple URL verification.
>>> validate_url("hello")
False
>>> validate_url("ftp://example.com")
False
>>> validate_url("http://")
False
>>> validate_url("http://wireload.net/logo.png")
True
>>> validate_url("https://wireload.net/logo.png")
True
"""
checker = urlparse(string)
return bool(checker.scheme in ('http', 'https', 'rtsp', 'rtmp') and checker.netloc)
def get_node_ip():
"""Returns the node's IP, for the interface
that is being used as the default gateway.
This should work on both MacOS X and Linux."""
try:
address_family_id = max(list(gateways()['default']))
default_interface = gateways()['default'][address_family_id][1]
my_ip = ifaddresses(default_interface)[address_family_id][0]['addr']
return my_ip
except ValueError:
raise Exception("Unable to resolve local IP address.")
def get_video_duration(file):
"""
Returns the duration of a video file in timedelta.
"""
time = None
try:
if arch in ('armv6l', 'armv7l'):
run_player = omxplayer(file, info=True, _err_to_out=True, _ok_code=[0, 1], _decode_errors='ignore')
else:
run_player = ffprobe('-i', file, _err_to_out=True)
except ErrorReturnCode_1:
raise Exception('Bad video format')
for line in run_player.split('\n'):
if 'Duration' in line:
match = re.search(r'[0-9]+:[0-9]+:[0-9]+\.[0-9]+', line)
if match:
time_input = match.group()
time_split = time_input.split(':')
hours = int(time_split[0])
minutes = int(time_split[1])
seconds = float(time_split[2])
time = timedelta(hours=hours, minutes=minutes, seconds=seconds)
break
return time
def handler(obj):
# Set timezone as UTC if it's datetime and format as ISO
if isinstance(obj, datetime):
with_tz = obj.replace(tzinfo=pytz.utc)
return with_tz.isoformat()
else:
raise TypeError('Object of type %s with value of %s is not JSON serializable' % (type(obj), repr(obj)))
def json_dump(obj):
return json.dumps(obj, default=handler)
def url_fails(url):
"""
If it is streaming
"""
if urlparse(url).scheme in ('rtsp', 'rtmp'):
if arch in ('armv6l', 'armv7l'):
run_omxplayer = omxplayer(url, info=True, _err_to_out=True, _ok_code=[0, 1])
for line in run_omxplayer.split('\n'):
if 'Input #0' in line:
return False
return True
else:
run_mplayer = mplayer('-identify', '-frames', '0', '-nosound', url)
for line in run_mplayer.split('\n'):
if 'Clip info:' in line:
return False
return True
"""
Try HEAD and GET for URL availability check.
"""
# Use Certifi module
if settings['verify_ssl']:
verify = certifi.where()
else:
verify = False
headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux armv7l) AppleWebKit/538.15 (KHTML, like Gecko) Version/8.0 Safari/538.15'
}
try:
if not validate_url(url):
return False
if requests.head(
url,
allow_redirects=True,
headers=headers,
timeout=10,
verify=verify
).status_code in HTTP_OK:
return False
if requests.get(
url,
allow_redirects=True,
headers=headers,
timeout=10,
verify=verify
).status_code in HTTP_OK:
return False
except (requests.ConnectionError, requests.exceptions.Timeout):
pass
return True
def download_video_from_youtube(uri, asset_id):
home = getenv('HOME')
name = check_output(['youtube-dl', '-e', uri])
info = json.loads(check_output(['youtube-dl', '-j', uri]))
duration = info['duration']
location = path.join(home, 'screenly_assets', asset_id)
thread = YoutubeDownloadThread(location, uri, asset_id)
thread.daemon = True
thread.start()
return location, unicode(name.decode('utf-8')), duration
class YoutubeDownloadThread(Thread):
def __init__(self, location, uri, asset_id):
Thread.__init__(self)
self.location = location
self.uri = uri
self.asset_id = asset_id
def run(self):
publisher = ZmqPublisher.get_instance()
call(['youtube-dl', '-f', 'mp4', '-o', self.location, self.uri])
with db.conn(settings['database']) as conn:
update(conn, self.asset_id, {'asset_id': self.asset_id, 'is_processing': 0})
publisher.send_to_ws_server(self.asset_id)
def template_handle_unicode(value):
if isinstance(value, str):
return value.decode('utf-8')
return unicode(value)
def is_demo_node():
"""
Check if the environment variable IS_DEMO_NODE is set to 1
:return: bool
"""
return string_to_bool(os.getenv('IS_DEMO_NODE', False))
def generate_perfect_paper_password(pw_length=10, has_symbols=True):
"""
Generates a password using 64 characters from
"Perfect Paper Password" system by Steve Gibson
:param pw_length: int
:param has_symbols: bool
:return: string
"""
ppp_letters = '!#%+23456789:=?@ABCDEFGHJKLMNPRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
if not has_symbols:
ppp_letters = ''.join(set(ppp_letters) - set(string.punctuation))
return "".join(random.SystemRandom().choice(ppp_letters) for _ in range(pw_length))
| gpl-2.0 |
sfu-fas/coursys | inventory/migrations/0004_add_asset_attachments_and_change_records.py | 1 | 4404 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import autoslug.fields
import django.core.files.storage
import inventory.models
import courselib.json_fields
class Migration(migrations.Migration):
dependencies = [
('outreach', '0001_initial'),
('coredata', '0014_auto_20160623_1509'),
('inventory', '0003_auto_20160728_1402'),
]
operations = [
migrations.CreateModel(
name='AssetChangeRecord',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('qty', models.IntegerField(help_text="The change in quantity. For removal of item, make it a negative number. For adding items, make it a positive. e.g. '-2' if someone removed two ofthis item for something", verbose_name='Quantity adjustment')),
('date', models.DateField(null=True, blank=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('last_modified', models.DateTimeField(editable=False)),
('hidden', models.BooleanField(default=False, editable=False)),
('saved_by_userid', models.CharField(max_length=8, editable=False)),
('config', courselib.json_fields.JSONField(default=dict, editable=False)),
('slug', autoslug.fields.AutoSlugField(populate_from='autoslug', unique=True, editable=False)),
],
),
migrations.CreateModel(
name='AssetDocumentAttachment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=250)),
('slug', autoslug.fields.AutoSlugField(populate_from='title', unique_with=('asset',), editable=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('contents', models.FileField(storage=django.core.files.storage.FileSystemStorage(base_url=None, location='submitted_files'), max_length=500, upload_to=inventory.models.asset_attachment_upload_to)),
('mediatype', models.CharField(max_length=200, null=True, editable=False, blank=True)),
('hidden', models.BooleanField(default=False, editable=False)),
],
options={
'ordering': ('created_at',),
},
),
migrations.AddField(
model_name='asset',
name='config',
field=courselib.json_fields.JSONField(default=dict, editable=False),
),
migrations.AddField(
model_name='asset',
name='last_order_date',
field=models.DateField(null=True, blank=True),
),
migrations.AddField(
model_name='asset',
name='min_vendor_qty',
field=models.PositiveIntegerField(help_text='The minimum quantity the vendor will let us order', null=True, verbose_name='Minimum vendor order quantity', blank=True),
),
migrations.AddField(
model_name='assetdocumentattachment',
name='asset',
field=models.ForeignKey(related_name='attachments', to='inventory.Asset', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='assetdocumentattachment',
name='created_by',
field=models.ForeignKey(help_text='Document attachment created by.', to='coredata.Person', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='assetchangerecord',
name='asset',
field=models.ForeignKey(related_name='records', to='inventory.Asset', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='assetchangerecord',
name='event',
field=models.ForeignKey(blank=True, to='outreach.OutreachEvent', help_text='The event it was for, if any', null=True, on_delete=models.CASCADE),
),
migrations.AddField(
model_name='assetchangerecord',
name='person',
field=models.ForeignKey(to='coredata.Person', on_delete=models.CASCADE),
),
migrations.AlterUniqueTogether(
name='assetdocumentattachment',
unique_together=set([('asset', 'slug')]),
),
]
| gpl-3.0 |
typemytype/Mechanic | Mechanic.roboFontExt/lib/site-packages/requests/packages/urllib3/util/url.py | 149 | 6289 | from __future__ import absolute_import
from collections import namedtuple
from ..exceptions import LocationParseError
url_attrs = ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment']
class Url(namedtuple('Url', url_attrs)):
"""
Datastructure for representing an HTTP URL. Used as a return value for
:func:`parse_url`. Both the scheme and host are normalized as they are
both case-insensitive according to RFC 3986.
"""
__slots__ = ()
def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None,
query=None, fragment=None):
if path and not path.startswith('/'):
path = '/' + path
if scheme:
scheme = scheme.lower()
if host:
host = host.lower()
return super(Url, cls).__new__(cls, scheme, auth, host, port, path,
query, fragment)
@property
def hostname(self):
"""For backwards-compatibility with urlparse. We're nice like that."""
return self.host
@property
def request_uri(self):
"""Absolute path including the query string."""
uri = self.path or '/'
if self.query is not None:
uri += '?' + self.query
return uri
@property
def netloc(self):
"""Network location including host and port"""
if self.port:
return '%s:%d' % (self.host, self.port)
return self.host
@property
def url(self):
"""
Convert self into a url
This function should more or less round-trip with :func:`.parse_url`. The
returned url may not be exactly the same as the url inputted to
:func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls
with a blank port will have : removed).
Example: ::
>>> U = parse_url('http://google.com/mail/')
>>> U.url
'http://google.com/mail/'
>>> Url('http', 'username:password', 'host.com', 80,
... '/path', 'query', 'fragment').url
'http://username:password@host.com:80/path?query#fragment'
"""
scheme, auth, host, port, path, query, fragment = self
url = ''
# We use "is not None" we want things to happen with empty strings (or 0 port)
if scheme is not None:
url += scheme + '://'
if auth is not None:
url += auth + '@'
if host is not None:
url += host
if port is not None:
url += ':' + str(port)
if path is not None:
url += path
if query is not None:
url += '?' + query
if fragment is not None:
url += '#' + fragment
return url
def __str__(self):
return self.url
def split_first(s, delims):
"""
Given a string and an iterable of delimiters, split on the first found
delimiter. Return two split parts and the matched delimiter.
If not found, then the first part is the full input string.
Example::
>>> split_first('foo/bar?baz', '?/=')
('foo', 'bar?baz', '/')
>>> split_first('foo/bar?baz', '123')
('foo/bar?baz', '', None)
Scales linearly with number of delims. Not ideal for large number of delims.
"""
min_idx = None
min_delim = None
for d in delims:
idx = s.find(d)
if idx < 0:
continue
if min_idx is None or idx < min_idx:
min_idx = idx
min_delim = d
if min_idx is None or min_idx < 0:
return s, '', None
return s[:min_idx], s[min_idx + 1:], min_delim
def parse_url(url):
"""
Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is
performed to parse incomplete urls. Fields not provided will be None.
Partly backwards-compatible with :mod:`urlparse`.
Example::
>>> parse_url('http://google.com/mail/')
Url(scheme='http', host='google.com', port=None, path='/mail/', ...)
>>> parse_url('google.com:80')
Url(scheme=None, host='google.com', port=80, path=None, ...)
>>> parse_url('/foo?bar')
Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)
"""
# While this code has overlap with stdlib's urlparse, it is much
# simplified for our needs and less annoying.
# Additionally, this implementations does silly things to be optimal
# on CPython.
if not url:
# Empty
return Url()
scheme = None
auth = None
host = None
port = None
path = None
fragment = None
query = None
# Scheme
if '://' in url:
scheme, url = url.split('://', 1)
# Find the earliest Authority Terminator
# (http://tools.ietf.org/html/rfc3986#section-3.2)
url, path_, delim = split_first(url, ['/', '?', '#'])
if delim:
# Reassemble the path
path = delim + path_
# Auth
if '@' in url:
# Last '@' denotes end of auth part
auth, url = url.rsplit('@', 1)
# IPv6
if url and url[0] == '[':
host, url = url.split(']', 1)
host += ']'
# Port
if ':' in url:
_host, port = url.split(':', 1)
if not host:
host = _host
if port:
# If given, ports must be integers. No whitespace, no plus or
# minus prefixes, no non-integer digits such as ^2 (superscript).
if not port.isdigit():
raise LocationParseError(url)
try:
port = int(port)
except ValueError:
raise LocationParseError(url)
else:
# Blank ports are cool, too. (rfc3986#section-3.2.3)
port = None
elif not host and url:
host = url
if not path:
return Url(scheme, auth, host, port, path, query, fragment)
# Fragment
if '#' in path:
path, fragment = path.split('#', 1)
# Query
if '?' in path:
path, query = path.split('?', 1)
return Url(scheme, auth, host, port, path, query, fragment)
def get_host(url):
"""
Deprecated. Use :func:`parse_url` instead.
"""
p = parse_url(url)
return p.scheme or 'http', p.hostname, p.port
| mit |
SickGear/SickGear | lib/tornado_py2/__init__.py | 4 | 1086 | #
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The Tornado web server and tools."""
from __future__ import absolute_import, division, print_function
# version is a human-readable version number.
# version_info is a four-tuple for programmatic comparison. The first
# three numbers are the components of the version number. The fourth
# is zero for an official release, positive for a development branch,
# or negative for a release candidate or beta (after the base version
# number has been incremented)
version = "5.1.1"
version_info = (5, 1, 1, 0)
| gpl-3.0 |
facelessuser/SubNotify | lib/notify/notify_windows.py | 1 | 10186 | """
Notify windows.
Copyright (c) 2013 - 2016 Isaac Muse <isaacmuse@gmail.com>
License: MIT
"""
import traceback
import winsound
import ctypes
import ctypes.wintypes as wintypes
import os
__all__ = ("get_notify", "alert", "setup", "windows_icons", "destroy")
if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
WPARAM = ctypes.c_ulong
LPARAM = ctypes.c_long
LRESULT = ctypes.c_long
elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
WPARAM = ctypes.c_ulonglong
LPARAM = ctypes.c_longlong
LRESULT = ctypes.c_longlong
HANDLE = ctypes.c_void_p
WNDPROCTYPE = WNDPROC = ctypes.CFUNCTYPE(LRESULT, HANDLE, ctypes.c_uint, WPARAM, LPARAM)
WM_DESTROY = 2
IMAGE_ICON = 1
LR_LOADFROMFILE = 16
LR_DEFAULTSIZE = 64
IDI_APPLICATION = 1
WS_OVERLAPPED = 0
WS_SYSMENU = 524288
CW_USEDEFAULT = -2147483648
WM_USER = 1024
NIM_ADD = 0x00
NIM_MODIFY = 0x01
NIM_DELETE = 0x02
NIM_SETVERSION = 0x04
NIF_MESSAGE = 0x01
NIF_ICON = 0x02
NIF_TIP = 0x04
NIF_STATE = 0x08
NIF_INFO = 0x10
NIF_REALTIME = 0x40
NIF_SHOWTIP = 0x80
NIIF_INFO = 0x1
NIIF_WARNING = 0x2
NIIF_ERROR = 0x3
NIIF_NOSOUND = 0x10
NIFF_USER = 0x00000004
NIS_HIDDEN = 0x01
HWND_MESSAGE = -3
NIN_BALLOONSHOW = WM_USER + 2
NIN_BALLOONHIDE = WM_USER + 3
NIN_BALLOONTIMEOUT = WM_USER + 4
NIN_BALLOONUSERCLICK = WM_USER + 5
class WndClassEx(ctypes.Structure):
"""The `WNDCLASSEX` structure."""
_fields_ = [
("cbSize", ctypes.c_uint),
("style", ctypes.c_uint),
("lpfnWndProc", WNDPROCTYPE),
("cbClsExtra", ctypes.c_int),
("cbWndExtra", ctypes.c_int),
("hInstance", HANDLE),
("hIcon", HANDLE),
("hCursor", HANDLE),
("hbrBackground", HANDLE),
("lpszMenuName", wintypes.LPCWSTR),
("lpszClassName", wintypes.LPCWSTR),
("hIconSm", HANDLE)
]
class NotifyIconData(ctypes.Structure):
"""The `NOTIFYICONDATA` structure."""
_fields_ = [
("cbSize", ctypes.c_uint),
("hWnd", HANDLE),
("uID", ctypes.c_uint),
("uFlags", ctypes.c_uint),
("uCallbackMessage", ctypes.c_uint),
("hIcon", HANDLE),
("szTip", ctypes.c_wchar * 128),
("dwState", ctypes.c_uint),
("dwStateMask", ctypes.c_uint),
("szInfo", ctypes.c_wchar * 256),
("uVersion", ctypes.c_uint),
("szInfoTitle", ctypes.c_wchar * 64),
("dwInfoFlags", ctypes.c_uint),
("guidItem", ctypes.c_char * 16),
("hBalloonIcon", HANDLE),
]
class Options:
"""Notification options."""
notify = None
instance = None
sound = None
@classmethod
def clear(cls):
"""Clear."""
cls.notify = None
cls.instance = None
cls.sound = None
def _alert(sound=None):
"""Play an alert sound for the OS."""
if sound is None and Options.sound is not None:
sound = Options.sound
try:
if sound:
winsound.PlaySound(sound, winsound.SND_FILENAME)
except Exception:
pass
def alert():
"""Alert."""
_alert()
class WinNotifyLevel:
"""Windows notification level."""
ICON_INFORMATION = 0x01
ICON_WARNING = 0x02
ICON_ERROR = 0x04
windows_icons = {
"Info": WinNotifyLevel.ICON_INFORMATION,
"Warning": WinNotifyLevel.ICON_WARNING,
"Error": WinNotifyLevel.ICON_ERROR
}
def notify_win_fallback(title, message, sound, icon, fallback):
"""Notify win calls the fallback."""
fallback(title, message, sound)
class WindowsNotify:
"""Windows notification class."""
window_handle = None
taskbar_icon = None
wc = None
def __init__(self, app_name, icon, tooltip=None):
"""
Create the taskbar for the application and register it.
Show nothing by default until called.
"""
def winproc(hwnd, msg, wparam, lparam):
"""Handle `winproc` events."""
if msg == WM_USER + 20 and lparam in (NIN_BALLOONTIMEOUT, NIN_BALLOONUSERCLICK):
pass
return hwnd
self.tooltip = tooltip
self.visible = False
self.app_name = app_name
# Register window class
wc = WndClassEx()
self.hinst = wc.hInstance = ctypes.windll.kernel32.GetModuleHandleW(None)
wc.cbSize = ctypes.sizeof(wc)
wc.lpszClassName = ctypes.c_wchar_p(app_name + "Taskbar")
wc.lpfnWndProc = WNDPROCTYPE(winproc)
wc.style = 0
wc.cbClsExtra = 0
wc.cbWndExtra = 0
wc.hIcon = 0
wc.hCursor = 0
wc.hbrBackground = 0
if WindowsNotify.wc is not None:
self._destroy_window()
ctypes.windll.user32.UnregisterClassW(wc.lpszClassName, None)
WindowsNotify.wc = wc
ctypes.windll.user32.RegisterClassExW(ctypes.byref(wc))
WindowsNotify.wc = wc
self.hicon = self.get_icon(icon)
self._show_notification('', '', False, self.hicon)
def get_icon(self, icon):
"""
Get icon.
Try to load the given icon from the path given,
else default to generic application icon from the OS.
"""
if WindowsNotify.taskbar_icon is not None:
ctypes.windll.user32.DestroyIcon(wintypes.HICON(WindowsNotify.taskbar_icon))
WindowsNotify.taskbar_icon = None
icon_flags = LR_LOADFROMFILE
try:
if icon is None:
raise ValueError("Icon is not available")
hicon = ctypes.windll.user32.LoadImageW(
self.hinst, icon,
IMAGE_ICON,
0, 0, icon_flags
)
except Exception:
hicon = ctypes.windll.user32.LoadIconA(0, IDI_APPLICATION)
WindowsNotify.taskbar_icon = hicon
return hicon
def show_notification(self, title, msg, sound, icon, fallback):
"""
Attempt to show notifications.
Provide fallback for consistency with other notification methods.
"""
try:
self._show_notification(title, msg, sound, icon)
except Exception:
print(traceback.format_exc())
fallback(title, msg, sound)
def _get_window(self):
"""Create the Window."""
if WindowsNotify.window_handle:
hwnd = WindowsNotify.window_handle
else:
hwnd = ctypes.windll.user32.FindWindowExW(
HWND_MESSAGE, None, WindowsNotify.wc.lpszClassName, None
)
if not hwnd:
style = WS_OVERLAPPED | WS_SYSMENU
hwnd = ctypes.windll.user32.CreateWindowExW(
0, WindowsNotify.wc.lpszClassName, WindowsNotify.wc.lpszClassName, style,
0, 0, CW_USEDEFAULT, CW_USEDEFAULT,
HWND_MESSAGE, 0, self.hinst, None
)
if hwnd:
WindowsNotify.window_handle = hwnd
ctypes.windll.user32.UpdateWindow(hwnd)
return hwnd
def _destroy_window(self):
"""Destroy the window."""
if WindowsNotify.window_handle:
if self.visible:
res = NotifyIconData()
res.cbSize = ctypes.sizeof(res)
res.hWnd = WindowsNotify.window_handle
res.uID = 0
res.uFlags = 0
res.uVersion = 4
ctypes.windll.shell32.Shell_NotifyIconW(NIM_DELETE, ctypes.byref(res))
ctypes.windll.user32.UpdateWindow(WindowsNotify.window_handle)
self.visible = False
ctypes.windll.user32.DestroyWindow(WindowsNotify.window_handle)
WindowsNotify.window_handle = None
def _show_notification(self, title, msg, sound, icon):
"""Call windows API to show notification."""
icon_level = 0
if icon & WinNotifyLevel.ICON_INFORMATION:
icon_level |= NIIF_INFO
elif icon & WinNotifyLevel.ICON_WARNING:
icon_level |= NIIF_WARNING
elif icon & WinNotifyLevel.ICON_ERROR:
icon_level |= NIIF_ERROR
hwnd = self._get_window()
if hwnd:
res = NotifyIconData()
res.cbSize = ctypes.sizeof(res)
res.hWnd = hwnd
res.uID = 0
# `NIF_SHOWTIP` and `NIF_TIP` is probably not needed for Windows 8+, but maybe for 7?
res.uFlags = NIF_INFO | NIF_ICON | NIF_STATE | NIF_SHOWTIP | NIF_TIP | NIF_MESSAGE
res.uCallbackMessage = WM_USER + 20
res.hIcon = self.hicon
res.szTip = self.app_name[:128]
res.uVersion = 4
res.szInfo = msg[:256]
res.szInfoTitle = title[:64]
res.dwInfoFlags = icon_level | NIIF_NOSOUND | NIFF_USER
if not ctypes.windll.shell32.Shell_NotifyIconW(NIM_MODIFY, ctypes.byref(res)):
if not self.visible and WindowsNotify.window_handle:
ctypes.windll.shell32.Shell_NotifyIconW(NIM_ADD, ctypes.byref(res))
ctypes.windll.shell32.Shell_NotifyIconW(NIM_SETVERSION, ctypes.byref(res))
self.visible = WindowsNotify.window_handle is not None
if sound:
alert()
def destroy(self):
"""Destroy."""
self._destroy_window()
@staticmethod
def NotifyWin(title, msg, sound, icon, fallback):
"""Notify for windows."""
Options.instance.show_notification(title, msg, sound, icon, fallback)
def setup(app_name, icon, **kwargs):
"""Setup."""
sound = kwargs.get('sound')
if sound is not None and os.path.exists(sound):
Options.sound = sound
try:
if icon is None or not os.path.exists(icon):
raise ValueError("Icon does not appear to be valid")
except Exception:
icon = None
Options.instance = WindowsNotify(app_name, icon, app_name)
Options.notify = NotifyWin
def destroy():
"""Destroy."""
if Options.instance is not None:
Options.instance.destroy()
Options.clear()
Options.notify = notify_win_fallback
def get_notify():
"""Get the notification."""
return Options.notify
Options.notify = notify_win_fallback
| mit |
GaussDing/django | django/contrib/admin/sites.py | 18 | 22090 | from functools import update_wrapper
from django.apps import apps
from django.conf import settings
from django.contrib.admin import ModelAdmin, actions
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.core.urlresolvers import NoReverseMatch, reverse
from django.db.models.base import ModelBase
from django.http import Http404, HttpResponseRedirect
from django.template.engine import Engine
from django.template.response import TemplateResponse
from django.utils import six
from django.utils.text import capfirst
from django.utils.translation import ugettext as _, ugettext_lazy
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect
system_check_errors = []
class AlreadyRegistered(Exception):
pass
class NotRegistered(Exception):
pass
class AdminSite(object):
"""
An AdminSite object encapsulates an instance of the Django admin application, ready
to be hooked in to your URLconf. Models are registered with the AdminSite using the
register() method, and the get_urls() method can then be used to access Django view
functions that present a full admin interface for the collection of registered
models.
"""
# Text to put at the end of each page's <title>.
site_title = ugettext_lazy('Django site admin')
# Text to put in each page's <h1>.
site_header = ugettext_lazy('Django administration')
# Text to put at the top of the admin index page.
index_title = ugettext_lazy('Site administration')
# URL for the "View site" link at the top of each admin page.
site_url = '/'
login_form = None
index_template = None
app_index_template = None
login_template = None
logout_template = None
password_change_template = None
password_change_done_template = None
def __init__(self, name='admin'):
self._registry = {} # model_class class -> admin_class instance
self.name = name
self._actions = {'delete_selected': actions.delete_selected}
self._global_actions = self._actions.copy()
def register(self, model_or_iterable, admin_class=None, **options):
"""
Registers the given model(s) with the given admin class.
The model(s) should be Model classes, not instances.
If an admin class isn't given, it will use ModelAdmin (the default
admin options). If keyword arguments are given -- e.g., list_display --
they'll be applied as options to the admin class.
If a model is already registered, this will raise AlreadyRegistered.
If a model is abstract, this will raise ImproperlyConfigured.
"""
if not admin_class:
admin_class = ModelAdmin
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model._meta.abstract:
raise ImproperlyConfigured('The model %s is abstract, so it '
'cannot be registered with admin.' % model.__name__)
if model in self._registry:
raise AlreadyRegistered('The model %s is already registered' % model.__name__)
# Ignore the registration if the model has been
# swapped out.
if not model._meta.swapped:
# If we got **options then dynamically construct a subclass of
# admin_class with those **options.
if options:
# For reasons I don't quite understand, without a __module__
# the created class appears to "live" in the wrong place,
# which causes issues later on.
options['__module__'] = __name__
admin_class = type("%sAdmin" % model.__name__, (admin_class,), options)
if admin_class is not ModelAdmin and settings.DEBUG:
system_check_errors.extend(admin_class.check(model))
# Instantiate the admin class to save in the registry
self._registry[model] = admin_class(model, self)
def unregister(self, model_or_iterable):
"""
Unregisters the given model(s).
If a model isn't already registered, this will raise NotRegistered.
"""
if isinstance(model_or_iterable, ModelBase):
model_or_iterable = [model_or_iterable]
for model in model_or_iterable:
if model not in self._registry:
raise NotRegistered('The model %s is not registered' % model.__name__)
del self._registry[model]
def is_registered(self, model):
"""
Check if a model class is registered with this `AdminSite`.
"""
return model in self._registry
def add_action(self, action, name=None):
"""
Register an action to be available globally.
"""
name = name or action.__name__
self._actions[name] = action
self._global_actions[name] = action
def disable_action(self, name):
"""
Disable a globally-registered action. Raises KeyError for invalid names.
"""
del self._actions[name]
def get_action(self, name):
"""
Explicitly get a registered global action whether it's enabled or
not. Raises KeyError for invalid names.
"""
return self._global_actions[name]
@property
def actions(self):
"""
Get all the enabled actions as an iterable of (name, func).
"""
return six.iteritems(self._actions)
def has_permission(self, request):
"""
Returns True if the given HttpRequest has permission to view
*at least one* page in the admin site.
"""
return request.user.is_active and request.user.is_staff
def check_dependencies(self):
"""
Check that all things needed to run the admin have been correctly installed.
The default implementation checks that admin and contenttypes apps are
installed, as well as the auth context processor.
"""
if not apps.is_installed('django.contrib.admin'):
raise ImproperlyConfigured(
"Put 'django.contrib.admin' in your INSTALLED_APPS "
"setting in order to use the admin application.")
if not apps.is_installed('django.contrib.contenttypes'):
raise ImproperlyConfigured(
"Put 'django.contrib.contenttypes' in your INSTALLED_APPS "
"setting in order to use the admin application.")
try:
default_template_engine = Engine.get_default()
except Exception:
# Skip this non-critical check:
# 1. if the user has a non-trivial TEMPLATES setting and Django
# can't find a default template engine
# 2. if anything goes wrong while loading template engines, in
# order to avoid raising an exception from a confusing location
# Catching ImproperlyConfigured suffices for 1. but 2. requires
# catching all exceptions.
pass
else:
if ('django.contrib.auth.context_processors.auth'
not in default_template_engine.context_processors):
raise ImproperlyConfigured(
"Enable 'django.contrib.auth.context_processors.auth' "
"in your TEMPLATES setting in order to use the admin "
"application.")
def admin_view(self, view, cacheable=False):
"""
Decorator to create an admin view attached to this ``AdminSite``. This
wraps the view and provides permission checking by calling
``self.has_permission``.
You'll want to use this from within ``AdminSite.get_urls()``:
class MyAdminSite(AdminSite):
def get_urls(self):
from django.conf.urls import url
urls = super(MyAdminSite, self).get_urls()
urls += [
url(r'^my_view/$', self.admin_view(some_view))
]
return urls
By default, admin_views are marked non-cacheable using the
``never_cache`` decorator. If the view can be safely cached, set
cacheable=True.
"""
def inner(request, *args, **kwargs):
if not self.has_permission(request):
if request.path == reverse('admin:logout', current_app=self.name):
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
# Inner import to prevent django.contrib.admin (app) from
# importing django.contrib.auth.models.User (unrelated model).
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(
request.get_full_path(),
reverse('admin:login', current_app=self.name)
)
return view(request, *args, **kwargs)
if not cacheable:
inner = never_cache(inner)
# We add csrf_protect here so this function can be used as a utility
# function for any view, without having to repeat 'csrf_protect'.
if not getattr(view, 'csrf_exempt', False):
inner = csrf_protect(inner)
return update_wrapper(inner, view)
def get_urls(self):
from django.conf.urls import url, include
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.contenttypes.views imports ContentType.
from django.contrib.contenttypes import views as contenttype_views
if settings.DEBUG:
self.check_dependencies()
def wrap(view, cacheable=False):
def wrapper(*args, **kwargs):
return self.admin_view(view, cacheable)(*args, **kwargs)
wrapper.admin_site = self
return update_wrapper(wrapper, view)
# Admin-site-wide views.
urlpatterns = [
url(r'^$', wrap(self.index), name='index'),
url(r'^login/$', self.login, name='login'),
url(r'^logout/$', wrap(self.logout), name='logout'),
url(r'^password_change/$', wrap(self.password_change, cacheable=True), name='password_change'),
url(r'^password_change/done/$', wrap(self.password_change_done, cacheable=True),
name='password_change_done'),
url(r'^jsi18n/$', wrap(self.i18n_javascript, cacheable=True), name='jsi18n'),
url(r'^r/(?P<content_type_id>\d+)/(?P<object_id>.+)/$', wrap(contenttype_views.shortcut),
name='view_on_site'),
]
# Add in each model's views, and create a list of valid URLS for the
# app_index
valid_app_labels = []
for model, model_admin in six.iteritems(self._registry):
urlpatterns += [
url(r'^%s/%s/' % (model._meta.app_label, model._meta.model_name), include(model_admin.urls)),
]
if model._meta.app_label not in valid_app_labels:
valid_app_labels.append(model._meta.app_label)
# If there were ModelAdmins registered, we should have a list of app
# labels for which we need to allow access to the app_index view,
if valid_app_labels:
regex = r'^(?P<app_label>' + '|'.join(valid_app_labels) + ')/$'
urlpatterns += [
url(regex, wrap(self.app_index), name='app_list'),
]
return urlpatterns
@property
def urls(self):
return self.get_urls(), 'admin', self.name
def each_context(self, request):
"""
Returns a dictionary of variables to put in the template context for
*every* page in the admin site.
"""
return {
'site_title': self.site_title,
'site_header': self.site_header,
'site_url': self.site_url,
'has_permission': self.has_permission(request),
}
def password_change(self, request, extra_context=None):
"""
Handles the "change password" task -- both form display and validation.
"""
from django.contrib.admin.forms import AdminPasswordChangeForm
from django.contrib.auth.views import password_change
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'current_app': self.name,
'password_change_form': AdminPasswordChangeForm,
'post_change_redirect': url,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
return password_change(request, **defaults)
def password_change_done(self, request, extra_context=None):
"""
Displays the "success" page after a password change.
"""
from django.contrib.auth.views import password_change_done
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.password_change_done_template is not None:
defaults['template_name'] = self.password_change_done_template
return password_change_done(request, **defaults)
def i18n_javascript(self, request):
"""
Displays the i18n JavaScript that the Django admin requires.
This takes into account the USE_I18N setting. If it's set to False, the
generated JavaScript will be leaner and faster.
"""
if settings.USE_I18N:
from django.views.i18n import javascript_catalog
else:
from django.views.i18n import null_javascript_catalog as javascript_catalog
return javascript_catalog(request, packages=['django.conf', 'django.contrib.admin'])
@never_cache
def logout(self, request, extra_context=None):
"""
Logs out the user for the given HttpRequest.
This should *not* assume the user is already logged in.
"""
from django.contrib.auth.views import logout
defaults = {
'current_app': self.name,
'extra_context': dict(self.each_context(request), **(extra_context or {})),
}
if self.logout_template is not None:
defaults['template_name'] = self.logout_template
return logout(request, **defaults)
@never_cache
def login(self, request, extra_context=None):
"""
Displays the login form for the given HttpRequest.
"""
if request.method == 'GET' and self.has_permission(request):
# Already logged-in, redirect to admin index
index_path = reverse('admin:index', current_app=self.name)
return HttpResponseRedirect(index_path)
from django.contrib.auth.views import login
# Since this module gets imported in the application's root package,
# it cannot import models from other applications at the module level,
# and django.contrib.admin.forms eventually imports User.
from django.contrib.admin.forms import AdminAuthenticationForm
context = dict(self.each_context(request),
title=_('Log in'),
app_path=request.get_full_path(),
)
if (REDIRECT_FIELD_NAME not in request.GET and
REDIRECT_FIELD_NAME not in request.POST):
context[REDIRECT_FIELD_NAME] = request.get_full_path()
context.update(extra_context or {})
defaults = {
'extra_context': context,
'current_app': self.name,
'authentication_form': self.login_form or AdminAuthenticationForm,
'template_name': self.login_template or 'admin/login.html',
}
return login(request, **defaults)
@never_cache
def index(self, request, extra_context=None):
"""
Displays the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
app_dict = {}
for model, model_admin in self._registry.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if has_module_perms:
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change', False):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add', False):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
# Sort the apps alphabetically.
app_list = list(six.itervalues(app_dict))
app_list.sort(key=lambda x: x['name'].lower())
# Sort the models alphabetically within each app.
for app in app_list:
app['models'].sort(key=lambda x: x['name'])
context = dict(
self.each_context(request),
title=self.index_title,
app_list=app_list,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or
'admin/index.html', context)
def app_index(self, request, app_label, extra_context=None):
app_name = apps.get_app_config(app_label).verbose_name
app_dict = {}
for model, model_admin in self._registry.items():
if app_label == model._meta.app_label:
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
raise PermissionDenied
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True in perms.values():
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_dict:
app_dict['models'].append(model_dict),
else:
# First time around, now that we know there's
# something to display, add in the necessary meta
# information.
app_dict = {
'name': app_name,
'app_label': app_label,
'app_url': '',
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if not app_dict:
raise Http404('The requested admin page does not exist.')
# Sort the models alphabetically within each app.
app_dict['models'].sort(key=lambda x: x['name'])
context = dict(self.each_context(request),
title=_('%(app)s administration') % {'app': app_name},
app_list=[app_dict],
app_label=app_label,
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.app_index_template or [
'admin/%s/app_index.html' % app_label,
'admin/app_index.html'
], context)
# This global object represents the default admin site, for the common case.
# You can instantiate AdminSite in your own code to create a custom admin site.
site = AdminSite()
| bsd-3-clause |
hastexo/edx-platform | cms/djangoapps/contentstore/management/commands/reindex_course.py | 18 | 4477 | """ Management command to update courses' search index """
import logging
from textwrap import dedent
from django.core.management import BaseCommand, CommandError
from elasticsearch import exceptions
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import CourseLocator
from search.search_engine_base import SearchEngine
from contentstore.courseware_index import CoursewareSearchIndexer
from xmodule.modulestore.django import modulestore
from .prompt import query_yes_no
class Command(BaseCommand):
"""
Command to re-index courses
Examples:
./manage.py reindex_course <course_id_1> <course_id_2> ... - reindexes courses with provided keys
./manage.py reindex_course --all - reindexes all available courses
./manage.py reindex_course --setup - reindexes all courses for devstack setup
"""
help = dedent(__doc__)
CONFIRMATION_PROMPT = u"Re-indexing all courses might be a time consuming operation. Do you want to continue?"
def add_arguments(self, parser):
parser.add_argument('course_ids',
nargs='*',
metavar='course_id')
parser.add_argument('--all',
action='store_true',
help='Reindex all courses')
parser.add_argument('--setup',
action='store_true',
help='Reindex all courses on developers stack setup')
def _parse_course_key(self, raw_value):
""" Parses course key from string """
try:
result = CourseKey.from_string(raw_value)
except InvalidKeyError:
raise CommandError("Invalid course_key: '%s'." % raw_value)
if not isinstance(result, CourseLocator):
raise CommandError(u"Argument {0} is not a course key".format(raw_value))
return result
def handle(self, *args, **options):
"""
By convention set by Django developers, this method actually executes command's actions.
So, there could be no better docstring than emphasize this once again.
"""
course_ids = options['course_ids']
all_option = options['all']
setup_option = options['setup']
index_all_courses_option = all_option or setup_option
if (not len(course_ids) and not index_all_courses_option) or \
(len(course_ids) and index_all_courses_option):
raise CommandError("reindex_course requires one or more <course_id>s OR the --all or --setup flags.")
store = modulestore()
if index_all_courses_option:
index_name = CoursewareSearchIndexer.INDEX_NAME
doc_type = CoursewareSearchIndexer.DOCUMENT_TYPE
if setup_option:
try:
# try getting the ElasticSearch engine
searcher = SearchEngine.get_search_engine(index_name)
except exceptions.ElasticsearchException as exc:
logging.exception('Search Engine error - %s', exc)
return
index_exists = searcher._es.indices.exists(index=index_name) # pylint: disable=protected-access
doc_type_exists = searcher._es.indices.exists_type( # pylint: disable=protected-access
index=index_name,
doc_type=doc_type
)
index_mapping = searcher._es.indices.get_mapping( # pylint: disable=protected-access
index=index_name,
doc_type=doc_type
) if index_exists and doc_type_exists else {}
if index_exists and index_mapping:
return
# if reindexing is done during devstack setup step, don't prompt the user
if setup_option or query_yes_no(self.CONFIRMATION_PROMPT, default="no"):
# in case of --setup or --all, get the list of course keys from all courses
# that are stored in the modulestore
course_keys = [course.id for course in modulestore().get_courses()]
else:
return
else:
# in case course keys are provided as arguments
course_keys = map(self._parse_course_key, course_ids)
for course_key in course_keys:
CoursewareSearchIndexer.do_course_reindex(store, course_key)
| agpl-3.0 |
webmasterraj/FogOrNot | flask/lib/python2.7/site-packages/boto/sqs/connection2.py | 6 | 38520 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
try:
import json
except ImportError:
import simplejson as json
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.sqs import exceptions
class SQSConnection(AWSQueryConnection):
"""
Welcome to the Amazon Simple Queue Service API Reference . This
section describes who should read this guide, how the guide is
organized, and other resources related to the Amazon Simple Queue
Service (Amazon SQS).
Amazon SQS offers reliable and scalable hosted queues for storing
messages as they travel between computers. By using Amazon SQS,
you can move data between distributed components of your
applications that perform different tasks without losing messages
or requiring each component to be always available.
Helpful Links:
+ `Current WSDL (2012-11-05)`_
+ `Making API Requests`_
+ `Amazon SQS product page`_
+ `Regions and Endpoints`_
We also provide SDKs that enable you to access Amazon SQS from
your preferred programming language. The SDKs contain
functionality that automatically takes care of tasks such as:
+ Cryptographically signing your service requests
+ Retrying requests
+ Handling error responses
For a list of available SDKs, go to `Tools for Amazon Web
Services`_.
"""
APIVersion = "2012-11-05"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "sqs.us-east-1.amazonaws.com"
ResponseError = JSONResponseError
_faults = {
"QueueDoesNotExist": exceptions.QueueDoesNotExist,
"BatchEntryIdsNotDistinct": exceptions.BatchEntryIdsNotDistinct,
"EmptyBatchRequest": exceptions.EmptyBatchRequest,
"OverLimit": exceptions.OverLimit,
"QueueNameExists": exceptions.QueueNameExists,
"InvalidMessageContents": exceptions.InvalidMessageContents,
"TooManyEntriesInBatchRequest": exceptions.TooManyEntriesInBatchRequest,
"QueueDeletedRecently": exceptions.QueueDeletedRecently,
"InvalidBatchEntryId": exceptions.InvalidBatchEntryId,
"BatchRequestTooLong": exceptions.BatchRequestTooLong,
"InvalidIdFormat": exceptions.InvalidIdFormat,
"ReceiptHandleIsInvalid": exceptions.ReceiptHandleIsInvalid,
"InvalidAttributeName": exceptions.InvalidAttributeName,
"MessageNotInflight": exceptions.MessageNotInflight,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(SQSConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def add_permission(self, queue_url, label, aws_account_ids, actions):
"""
Adds a permission to a queue for a specific `principal`_. This
allows for sharing access to the queue.
When you create a queue, you have full control access rights
for the queue. Only you (as owner of the queue) can grant or
deny permissions to the queue. For more information about
these permissions, see `Shared Queues`_ in the Amazon SQS
Developer Guide .
`AddPermission` writes an Amazon SQS-generated policy. If you
want to write your own policy, use SetQueueAttributes to
upload your policy. For more information about writing your
own policy, see `Using The Access Policy Language`_ in the
Amazon SQS Developer Guide .
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type label: string
:param label: The unique identification of the permission you're
setting (e.g., `AliceSendMessage`). Constraints: Maximum 80
characters; alphanumeric characters, hyphens (-), and underscores
(_) are allowed.
:type aws_account_ids: list
:param aws_account_ids: The AWS account number of the `principal`_ who
will be given permission. The principal must have an AWS account,
but does not need to be signed up for Amazon SQS. For information
about locating the AWS account identification, see `Your AWS
Identifiers`_ in the Amazon SQS Developer Guide .
:type actions: list
:param actions: The action the client wants to allow for the specified
principal. The following are valid values: `* | SendMessage |
ReceiveMessage | DeleteMessage | ChangeMessageVisibility |
GetQueueAttributes | GetQueueUrl`. For more information about these
actions, see `Understanding Permissions`_ in the Amazon SQS
Developer Guide .
Specifying `SendMessage`, `DeleteMessage`, or `ChangeMessageVisibility`
for the `ActionName.n` also grants permissions for the
corresponding batch versions of those actions: `SendMessageBatch`,
`DeleteMessageBatch`, and `ChangeMessageVisibilityBatch`.
"""
params = {'QueueUrl': queue_url, 'Label': label, }
self.build_list_params(params,
aws_account_ids,
'AWSAccountIds.member')
self.build_list_params(params,
actions,
'Actions.member')
return self._make_request(
action='AddPermission',
verb='POST',
path='/', params=params)
def change_message_visibility(self, queue_url, receipt_handle,
visibility_timeout):
"""
Changes the visibility timeout of a specified message in a
queue to a new value. The maximum allowed timeout value you
can set the value to is 12 hours. This means you can't extend
the timeout of a message in an existing queue to more than a
total visibility timeout of 12 hours. (For more information
visibility timeout, see `Visibility Timeout`_ in the Amazon
SQS Developer Guide .)
For example, let's say you have a message and its default
message visibility timeout is 30 minutes. You could call
`ChangeMessageVisiblity` with a value of two hours and the
effective timeout would be two hours and 30 minutes. When that
time comes near you could again extend the time out by calling
ChangeMessageVisiblity, but this time the maximum allowed
timeout would be 9 hours and 30 minutes.
If you attempt to set the `VisibilityTimeout` to an amount
more than the maximum time left, Amazon SQS returns an error.
It will not automatically recalculate and increase the timeout
to the maximum time remaining. Unlike with a queue, when you
change the visibility timeout for a specific message, that
timeout value is applied immediately but is not saved in
memory for that message. If you don't delete a message after
it is received, the visibility timeout for the message the
next time it is received reverts to the original timeout
value, not the value you set with the
`ChangeMessageVisibility` action.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type receipt_handle: string
:param receipt_handle: The receipt handle associated with the message
whose visibility timeout should be changed. This parameter is
returned by the ReceiveMessage action.
:type visibility_timeout: integer
:param visibility_timeout: The new value (in seconds - from 0 to 43200
- maximum 12 hours) for the message's visibility timeout.
"""
params = {
'QueueUrl': queue_url,
'ReceiptHandle': receipt_handle,
'VisibilityTimeout': visibility_timeout,
}
return self._make_request(
action='ChangeMessageVisibility',
verb='POST',
path='/', params=params)
def change_message_visibility_batch(self, queue_url, entries):
"""
Changes the visibility timeout of multiple messages. This is a
batch version of ChangeMessageVisibility. The result of the
action on each message is reported individually in the
response. You can send up to 10 ChangeMessageVisibility
requests with each `ChangeMessageVisibilityBatch` action.
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200. Some API actions take lists of parameters. These lists
are specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of receipt handles of the messages for which the
visibility timeout must be changed.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'ReceiptHandle', 'VisibilityTimeout'))
return self._make_request(
action='ChangeMessageVisibilityBatch',
verb='POST',
path='/', params=params)
def create_queue(self, queue_name, attributes=None):
"""
Creates a new queue, or returns the URL of an existing one.
When you request `CreateQueue`, you provide a name for the
queue. To successfully create a new queue, you must provide a
name that is unique within the scope of your own queues.
If you delete a queue, you must wait at least 60 seconds
before creating a queue with the same name.
You may pass one or more attributes in the request. If you do
not provide a value for any attribute, the queue will have the
default value for that attribute. Permitted attributes are the
same that can be set using SetQueueAttributes.
Use GetQueueUrl to get a queue's URL. GetQueueUrl requires
only the `QueueName` parameter.
If you provide the name of an existing queue, along with the
exact names and values of all the queue's attributes,
`CreateQueue` returns the queue URL for the existing queue. If
the queue name, attribute names, or attribute values do not
match an existing queue, `CreateQueue` returns an error.
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_name: string
:param queue_name: The name for the queue to be created.
:type attributes: map
:param attributes: A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special
request parameters the `CreateQueue` action uses:
+ `DelaySeconds` - The time in seconds that the delivery of all
messages in the queue will be delayed. An integer from 0 to 900 (15
minutes). The default for this attribute is 0 (zero).
+ `MaximumMessageSize` - The limit of how many bytes a message can
contain before Amazon SQS rejects it. An integer from 1024 bytes (1
KiB) up to 262144 bytes (256 KiB). The default for this attribute
is 262144 (256 KiB).
+ `MessageRetentionPeriod` - The number of seconds Amazon SQS retains a
message. Integer representing seconds, from 60 (1 minute) to
1209600 (14 days). The default for this attribute is 345600 (4
days).
+ `Policy` - The queue's policy. A valid form-url-encoded policy. For
more information about policy structure, see `Basic Policy
Structure`_ in the Amazon SQS Developer Guide . For more
information about form-url-encoding, see `http://www.w3.org/MarkUp
/html-spec/html-spec_8.html#SEC8.2.1`_.
+ `ReceiveMessageWaitTimeSeconds` - The time for which a ReceiveMessage
call will wait for a message to arrive. An integer from 0 to 20
(seconds). The default for this attribute is 0.
+ `VisibilityTimeout` - The visibility timeout for the queue. An
integer from 0 to 43200 (12 hours). The default for this attribute
is 30. For more information about visibility timeout, see
`Visibility Timeout`_ in the Amazon SQS Developer Guide .
"""
params = {'QueueName': queue_name, }
if attributes is not None:
params['Attributes'] = attributes
return self._make_request(
action='CreateQueue',
verb='POST',
path='/', params=params)
def delete_message(self, queue_url, receipt_handle):
"""
Deletes the specified message from the specified queue. You
specify the message by using the message's `receipt handle`
and not the `message ID` you received when you sent the
message. Even if the message is locked by another reader due
to the visibility timeout setting, it is still deleted from
the queue. If you leave a message in the queue for longer than
the queue's configured retention period, Amazon SQS
automatically deletes it.
The receipt handle is associated with a specific instance of
receiving the message. If you receive a message more than
once, the receipt handle you get each time you receive the
message is different. When you request `DeleteMessage`, if you
don't provide the most recently received receipt handle for
the message, the request will still succeed, but the message
might not be deleted.
It is possible you will receive a message even after you have
deleted it. This might happen on rare occasions if one of the
servers storing a copy of the message is unavailable when you
request to delete the message. The copy remains on the server
and might be returned to you again on a subsequent receive
request. You should create your system to be idempotent so
that receiving a particular message more than once is not a
problem.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type receipt_handle: string
:param receipt_handle: The receipt handle associated with the message
to delete.
"""
params = {
'QueueUrl': queue_url,
'ReceiptHandle': receipt_handle,
}
return self._make_request(
action='DeleteMessage',
verb='POST',
path='/', params=params)
def delete_message_batch(self, queue_url, entries):
"""
Deletes multiple messages. This is a batch version of
DeleteMessage. The result of the delete action on each message
is reported individually in the response.
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200.
Some API actions take lists of parameters. These lists are
specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of receipt handles for the messages to be
deleted.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'ReceiptHandle'))
return self._make_request(
action='DeleteMessageBatch',
verb='POST',
path='/', params=params)
def delete_queue(self, queue_url):
"""
Deletes the queue specified by the **queue URL**, regardless
of whether the queue is empty. If the specified queue does not
exist, Amazon SQS returns a successful response.
Use `DeleteQueue` with care; once you delete your queue, any
messages in the queue are no longer available.
When you delete a queue, the deletion process takes up to 60
seconds. Requests you send involving that queue during the 60
seconds might succeed. For example, a SendMessage request
might succeed, but after the 60 seconds, the queue and that
message you sent no longer exist. Also, when you delete a
queue, you must wait at least 60 seconds before creating a
queue with the same name.
We reserve the right to delete queues that have had no
activity for more than 30 days. For more information, see `How
Amazon SQS Queues Work`_ in the Amazon SQS Developer Guide .
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
"""
params = {'QueueUrl': queue_url, }
return self._make_request(
action='DeleteQueue',
verb='POST',
path='/', params=params)
def get_queue_attributes(self, queue_url, attribute_names=None):
"""
Gets attributes for the specified queue. The following
attributes are supported:
+ `All` - returns all values.
+ `ApproximateNumberOfMessages` - returns the approximate
number of visible messages in a queue. For more information,
see `Resources Required to Process Messages`_ in the Amazon
SQS Developer Guide .
+ `ApproximateNumberOfMessagesNotVisible` - returns the
approximate number of messages that are not timed-out and not
deleted. For more information, see `Resources Required to
Process Messages`_ in the Amazon SQS Developer Guide .
+ `VisibilityTimeout` - returns the visibility timeout for the
queue. For more information about visibility timeout, see
`Visibility Timeout`_ in the Amazon SQS Developer Guide .
+ `CreatedTimestamp` - returns the time when the queue was
created (epoch time in seconds).
+ `LastModifiedTimestamp` - returns the time when the queue
was last changed (epoch time in seconds).
+ `Policy` - returns the queue's policy.
+ `MaximumMessageSize` - returns the limit of how many bytes a
message can contain before Amazon SQS rejects it.
+ `MessageRetentionPeriod` - returns the number of seconds
Amazon SQS retains a message.
+ `QueueArn` - returns the queue's Amazon resource name (ARN).
+ `ApproximateNumberOfMessagesDelayed` - returns the
approximate number of messages that are pending to be added to
the queue.
+ `DelaySeconds` - returns the default delay on the queue in
seconds.
+ `ReceiveMessageWaitTimeSeconds` - returns the time for which
a ReceiveMessage call will wait for a message to arrive.
+ `RedrivePolicy` - returns the parameters for dead letter
queue functionality of the source queue. For more information
about RedrivePolicy and dead letter queues, see `Using Amazon
SQS Dead Letter Queues`_ in the Amazon SQS Developer Guide .
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully. Some API actions take lists of parameters. These
lists are specified using the `param.n` notation. Values of
`n` are integers starting from 1. For example, a parameter
list with two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attribute_names: list
:param attribute_names: A list of attributes to retrieve information
for.
"""
params = {'QueueUrl': queue_url, }
if attribute_names is not None:
self.build_list_params(params,
attribute_names,
'AttributeNames.member')
return self._make_request(
action='GetQueueAttributes',
verb='POST',
path='/', params=params)
def get_queue_url(self, queue_name, queue_owner_aws_account_id=None):
"""
Returns the URL of an existing queue. This action provides a
simple way to retrieve the URL of an Amazon SQS queue.
To access a queue that belongs to another AWS account, use the
`QueueOwnerAWSAccountId` parameter to specify the account ID
of the queue's owner. The queue's owner must grant you
permission to access the queue. For more information about
shared queue access, see AddPermission or go to `Shared
Queues`_ in the Amazon SQS Developer Guide .
:type queue_name: string
:param queue_name: The name of the queue whose URL must be fetched.
Maximum 80 characters; alphanumeric characters, hyphens (-), and
underscores (_) are allowed.
:type queue_owner_aws_account_id: string
:param queue_owner_aws_account_id: The AWS account ID of the account
that created the queue.
"""
params = {'QueueName': queue_name, }
if queue_owner_aws_account_id is not None:
params['QueueOwnerAWSAccountId'] = queue_owner_aws_account_id
return self._make_request(
action='GetQueueUrl',
verb='POST',
path='/', params=params)
def list_dead_letter_source_queues(self, queue_url):
"""
Returns a list of your queues that have the RedrivePolicy
queue attribute configured with a dead letter queue.
:type queue_url: string
:param queue_url: The queue URL of a dead letter queue.
"""
params = {'QueueUrl': queue_url, }
return self._make_request(
action='ListDeadLetterSourceQueues',
verb='POST',
path='/', params=params)
def list_queues(self, queue_name_prefix=None):
"""
Returns a list of your queues. The maximum number of queues
that can be returned is 1000. If you specify a value for the
optional `QueueNamePrefix` parameter, only queues with a name
beginning with the specified value are returned.
:type queue_name_prefix: string
:param queue_name_prefix: A string to use for filtering the list
results. Only those queues whose name begins with the specified
string are returned.
"""
params = {}
if queue_name_prefix is not None:
params['QueueNamePrefix'] = queue_name_prefix
return self._make_request(
action='ListQueues',
verb='POST',
path='/', params=params)
def receive_message(self, queue_url, attribute_names=None,
max_number_of_messages=None, visibility_timeout=None,
wait_time_seconds=None):
"""
Retrieves one or more messages from the specified queue. Long
poll support is enabled by using the `WaitTimeSeconds`
parameter. For more information, see `Amazon SQS Long Poll`_
in the Amazon SQS Developer Guide .
Short poll is the default behavior where a weighted random set
of machines is sampled on a `ReceiveMessage` call. This means
only the messages on the sampled machines are returned. If the
number of messages in the queue is small (less than 1000), it
is likely you will get fewer messages than you requested per
`ReceiveMessage` call. If the number of messages in the queue
is extremely small, you might not receive any messages in a
particular `ReceiveMessage` response; in which case you should
repeat the request.
For each message returned, the response includes the
following:
+ Message body
+ MD5 digest of the message body. For information about MD5,
go to `http://www.faqs.org/rfcs/rfc1321.html`_.
+ Message ID you received when you sent the message to the
queue.
+ Receipt handle.
The receipt handle is the identifier you must provide when
deleting the message. For more information, see `Queue and
Message Identifiers`_ in the Amazon SQS Developer Guide .
You can provide the `VisibilityTimeout` parameter in your
request, which will be applied to the messages that Amazon SQS
returns in the response. If you do not include the parameter,
the overall visibility timeout for the queue is used for the
returned messages. For more information, see `Visibility
Timeout`_ in the Amazon SQS Developer Guide .
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attribute_names: list
:param attribute_names:
A list of attributes that need to be returned along with each message.
The following lists the names and descriptions of the attributes that
can be returned:
+ `All` - returns all values.
+ `ApproximateFirstReceiveTimestamp` - returns the time when the
message was first received (epoch time in milliseconds).
+ `ApproximateReceiveCount` - returns the number of times a message has
been received but not deleted.
+ `SenderId` - returns the AWS account number (or the IP address, if
anonymous access is allowed) of the sender.
+ `SentTimestamp` - returns the time when the message was sent (epoch
time in milliseconds).
:type max_number_of_messages: integer
:param max_number_of_messages: The maximum number of messages to
return. Amazon SQS never returns more messages than this value but
may return fewer.
All of the messages are not necessarily returned.
:type visibility_timeout: integer
:param visibility_timeout: The duration (in seconds) that the received
messages are hidden from subsequent retrieve requests after being
retrieved by a `ReceiveMessage` request.
:type wait_time_seconds: integer
:param wait_time_seconds: The duration (in seconds) for which the call
will wait for a message to arrive in the queue before returning. If
a message is available, the call will return sooner than
WaitTimeSeconds.
"""
params = {'QueueUrl': queue_url, }
if attribute_names is not None:
self.build_list_params(params,
attribute_names,
'AttributeNames.member')
if max_number_of_messages is not None:
params['MaxNumberOfMessages'] = max_number_of_messages
if visibility_timeout is not None:
params['VisibilityTimeout'] = visibility_timeout
if wait_time_seconds is not None:
params['WaitTimeSeconds'] = wait_time_seconds
return self._make_request(
action='ReceiveMessage',
verb='POST',
path='/', params=params)
def remove_permission(self, queue_url, label):
"""
Revokes any permissions in the queue policy that matches the
specified `Label` parameter. Only the owner of the queue can
remove permissions.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type label: string
:param label: The identification of the permission to remove. This is
the label added with the AddPermission action.
"""
params = {'QueueUrl': queue_url, 'Label': label, }
return self._make_request(
action='RemovePermission',
verb='POST',
path='/', params=params)
def send_message(self, queue_url, message_body, delay_seconds=None):
"""
Delivers a message to the specified queue. With Amazon SQS,
you now have the ability to send large payload messages that
are up to 256KB (262,144 bytes) in size. To send large
payloads, you must use an AWS SDK that supports SigV4 signing.
To verify whether SigV4 is supported for an AWS SDK, check the
SDK release notes.
The following list shows the characters (in Unicode) allowed
in your message, according to the W3C XML specification. For
more information, go to `http://www.w3.org/TR/REC-
xml/#charsets`_ If you send any characters not included in the
list, your request will be rejected.
#x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] |
[#x10000 to #x10FFFF]
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type message_body: string
:param message_body: The message to send. String maximum 256 KB in
size. For a list of allowed characters, see the preceding important
note.
:type delay_seconds: integer
:param delay_seconds: The number of seconds (0 to 900 - 15 minutes) to
delay a specific message. Messages with a positive `DelaySeconds`
value become available for processing after the delay time is
finished. If you don't specify a value, the default value for the
queue applies.
"""
params = {
'QueueUrl': queue_url,
'MessageBody': message_body,
}
if delay_seconds is not None:
params['DelaySeconds'] = delay_seconds
return self._make_request(
action='SendMessage',
verb='POST',
path='/', params=params)
def send_message_batch(self, queue_url, entries):
"""
Delivers up to ten messages to the specified queue. This is a
batch version of SendMessage. The result of the send action on
each message is reported individually in the response. The
maximum allowed individual message size is 256 KB (262,144
bytes).
The maximum total payload size (i.e., the sum of all a batch's
individual message lengths) is also 256 KB (262,144 bytes).
If the `DelaySeconds` parameter is not specified for an entry,
the default for the queue is used.
The following list shows the characters (in Unicode) that are
allowed in your message, according to the W3C XML
specification. For more information, go to
`http://www.faqs.org/rfcs/rfc1321.html`_. If you send any
characters that are not included in the list, your request
will be rejected.
#x9 | #xA | #xD | [#x20 to #xD7FF] | [#xE000 to #xFFFD] |
[#x10000 to #x10FFFF]
Because the batch request can result in a combination of
successful and unsuccessful actions, you should check for
batch errors even when the call returns an HTTP status code of
200. Some API actions take lists of parameters. These lists
are specified using the `param.n` notation. Values of `n` are
integers starting from 1. For example, a parameter list with
two elements looks like this:
`&Attribute.1=this`
`&Attribute.2=that`
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type entries: list
:param entries: A list of SendMessageBatchRequestEntry items.
"""
params = {'QueueUrl': queue_url, }
self.build_complex_list_params(
params, entries,
'Entries.member',
('Id', 'MessageBody', 'DelaySeconds'))
return self._make_request(
action='SendMessageBatch',
verb='POST',
path='/', params=params)
def set_queue_attributes(self, queue_url, attributes):
"""
Sets the value of one or more queue attributes.
Going forward, new attributes might be added. If you are
writing code that calls this action, we recommend that you
structure your code so that it can handle new attributes
gracefully.
:type queue_url: string
:param queue_url: The URL of the Amazon SQS queue to take action on.
:type attributes: map
:param attributes: A map of attributes to set.
The following lists the names, descriptions, and values of the special
request parameters the `SetQueueAttributes` action uses:
+ `DelaySeconds` - The time in seconds that the delivery of all
messages in the queue will be delayed. An integer from 0 to 900 (15
minutes). The default for this attribute is 0 (zero).
+ `MaximumMessageSize` - The limit of how many bytes a message can
contain before Amazon SQS rejects it. An integer from 1024 bytes (1
KiB) up to 262144 bytes (256 KiB). The default for this attribute
is 262144 (256 KiB).
+ `MessageRetentionPeriod` - The number of seconds Amazon SQS retains a
message. Integer representing seconds, from 60 (1 minute) to
1209600 (14 days). The default for this attribute is 345600 (4
days).
+ `Policy` - The queue's policy. A valid form-url-encoded policy. For
more information about policy structure, see `Basic Policy
Structure`_ in the Amazon SQS Developer Guide . For more
information about form-url-encoding, see `http://www.w3.org/MarkUp
/html-spec/html-spec_8.html#SEC8.2.1`_.
+ `ReceiveMessageWaitTimeSeconds` - The time for which a ReceiveMessage
call will wait for a message to arrive. An integer from 0 to 20
(seconds). The default for this attribute is 0.
+ `VisibilityTimeout` - The visibility timeout for the queue. An
integer from 0 to 43200 (12 hours). The default for this attribute
is 30. For more information about visibility timeout, see
Visibility Timeout in the Amazon SQS Developer Guide .
+ `RedrivePolicy` - The parameters for dead letter queue functionality
of the source queue. For more information about RedrivePolicy and
dead letter queues, see Using Amazon SQS Dead Letter Queues in the
Amazon SQS Developer Guide .
"""
params = {'QueueUrl': queue_url, }
# TODO: NEED TO PROCESS COMPLEX ARG attributes of type map.
return self._make_request(
action='SetQueueAttributes',
verb='POST',
path='/', params=params)
def _make_request(self, action, verb, path, params):
params['ContentType'] = 'JSON'
response = self.make_request(action=action, verb='POST',
path='/', params=params)
body = response.read()
boto.log.debug(body)
if response.status == 200:
return json.loads(body)
else:
json_body = json.loads(body)
fault_name = json_body.get('Error', {}).get('Code', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| gpl-2.0 |
jensonjose/utilbox | setup.py | 1 | 1499 | """
Setup module for the utilbox package.
"""
import setuptools
from utilbox import __conf__
def read_file(file_path):
with open(file_path, "r") as target_file:
return target_file.read()
# retrieve information from package files
package_version = __conf__.config_map["version"]
package_requirements = read_file("requirements.txt").splitlines()
package_long_description = read_file("README.md")
package_list = setuptools.find_packages(exclude=["tests"])
config = {
"name": "utilbox",
"description": "Collection of utility packages for Python.",
"long_description": package_long_description,
"author": "Jenson Jose",
"author_email": "jensonjose@live.in",
"license": "MIT",
"platforms": ["Any"],
"url": "https://github.com/jensonjose/utilbox",
"version": package_version,
"install_requires": package_requirements,
"packages": package_list,
"classifiers": ["Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Utilities"]
}
setuptools.setup(**config)
| mit |
bhargav2408/python-for-android | python-modules/twisted/twisted/web/test/test_xmlrpc.py | 49 | 23332 | # -*- test-case-name: twisted.web.test.test_xmlrpc -*-
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for XML-RPC support in L{twisted.web.xmlrpc}.
"""
import datetime
import xmlrpclib
from StringIO import StringIO
from twisted.trial import unittest
from twisted.web import xmlrpc
from twisted.web.xmlrpc import (
XMLRPC, payloadTemplate, addIntrospection, _QueryFactory, Proxy, withRequest)
from twisted.web import server, static, client, error, http
from twisted.internet import reactor, defer
from twisted.internet.error import ConnectionDone
from twisted.python import failure
from twisted.web.test.test_web import DummyRequest
class AsyncXMLRPCTests(unittest.TestCase):
"""
Tests for L{XMLRPC}'s support of Deferreds.
"""
def setUp(self):
self.request = DummyRequest([''])
self.request.method = 'POST'
self.request.content = StringIO(
payloadTemplate % ('async', xmlrpclib.dumps(())))
result = self.result = defer.Deferred()
class AsyncResource(XMLRPC):
def xmlrpc_async(self):
return result
self.resource = AsyncResource()
def test_deferredResponse(self):
"""
If an L{XMLRPC} C{xmlrpc_*} method returns a L{defer.Deferred}, the
response to the request is the result of that L{defer.Deferred}.
"""
self.resource.render(self.request)
self.assertEquals(self.request.written, [])
self.result.callback("result")
resp = xmlrpclib.loads("".join(self.request.written))
self.assertEquals(resp, (('result',), None))
self.assertEquals(self.request.finished, 1)
def test_interruptedDeferredResponse(self):
"""
While waiting for the L{Deferred} returned by an L{XMLRPC} C{xmlrpc_*}
method to fire, the connection the request was issued over may close.
If this happens, neither C{write} nor C{finish} is called on the
request.
"""
self.resource.render(self.request)
self.request.processingFailed(
failure.Failure(ConnectionDone("Simulated")))
self.result.callback("result")
self.assertEquals(self.request.written, [])
self.assertEquals(self.request.finished, 0)
class TestRuntimeError(RuntimeError):
pass
class TestValueError(ValueError):
pass
class Test(XMLRPC):
# If you add xmlrpc_ methods to this class, go change test_listMethods
# below.
FAILURE = 666
NOT_FOUND = 23
SESSION_EXPIRED = 42
def xmlrpc_echo(self, arg):
return arg
# the doc string is part of the test
def xmlrpc_add(self, a, b):
"""
This function add two numbers.
"""
return a + b
xmlrpc_add.signature = [['int', 'int', 'int'],
['double', 'double', 'double']]
# the doc string is part of the test
def xmlrpc_pair(self, string, num):
"""
This function puts the two arguments in an array.
"""
return [string, num]
xmlrpc_pair.signature = [['array', 'string', 'int']]
# the doc string is part of the test
def xmlrpc_defer(self, x):
"""Help for defer."""
return defer.succeed(x)
def xmlrpc_deferFail(self):
return defer.fail(TestValueError())
# don't add a doc string, it's part of the test
def xmlrpc_fail(self):
raise TestRuntimeError
def xmlrpc_fault(self):
return xmlrpc.Fault(12, "hello")
def xmlrpc_deferFault(self):
return defer.fail(xmlrpc.Fault(17, "hi"))
def xmlrpc_complex(self):
return {"a": ["b", "c", 12, []], "D": "foo"}
def xmlrpc_dict(self, map, key):
return map[key]
xmlrpc_dict.help = 'Help for dict.'
@withRequest
def xmlrpc_withRequest(self, request, other):
"""
A method decorated with L{withRequest} which can be called by
a test to verify that the request object really is passed as
an argument.
"""
return (
# as a proof that request is a request
request.method +
# plus proof other arguments are still passed along
' ' + other)
def _getFunction(self, functionPath):
try:
return XMLRPC._getFunction(self, functionPath)
except xmlrpc.NoSuchFunction:
if functionPath.startswith("SESSION"):
raise xmlrpc.Fault(self.SESSION_EXPIRED,
"Session non-existant/expired.")
else:
raise
class TestAuthHeader(Test):
"""
This is used to get the header info so that we can test
authentication.
"""
def __init__(self):
Test.__init__(self)
self.request = None
def render(self, request):
self.request = request
return Test.render(self, request)
def xmlrpc_authinfo(self):
return self.request.getUser(), self.request.getPassword()
class TestQueryProtocol(xmlrpc.QueryProtocol):
"""
QueryProtocol for tests that saves headers received inside the factory.
"""
def connectionMade(self):
self.factory.transport = self.transport
xmlrpc.QueryProtocol.connectionMade(self)
def handleHeader(self, key, val):
self.factory.headers[key.lower()] = val
class TestQueryFactory(xmlrpc._QueryFactory):
"""
QueryFactory using L{TestQueryProtocol} for saving headers.
"""
protocol = TestQueryProtocol
def __init__(self, *args, **kwargs):
self.headers = {}
xmlrpc._QueryFactory.__init__(self, *args, **kwargs)
class TestQueryFactoryCancel(xmlrpc._QueryFactory):
"""
QueryFactory that saves a reference to the
L{twisted.internet.interfaces.IConnector} to test connection lost.
"""
def startedConnecting(self, connector):
self.connector = connector
class XMLRPCTestCase(unittest.TestCase):
def setUp(self):
self.p = reactor.listenTCP(0, server.Site(Test()),
interface="127.0.0.1")
self.port = self.p.getHost().port
self.factories = []
def tearDown(self):
self.factories = []
return self.p.stopListening()
def queryFactory(self, *args, **kwargs):
"""
Specific queryFactory for proxy that uses our custom
L{TestQueryFactory}, and save factories.
"""
factory = TestQueryFactory(*args, **kwargs)
self.factories.append(factory)
return factory
def proxy(self, factory=None):
"""
Return a new xmlrpc.Proxy for the test site created in
setUp(), using the given factory as the queryFactory, or
self.queryFactory if no factory is provided.
"""
p = xmlrpc.Proxy("http://127.0.0.1:%d/" % self.port)
if factory is None:
p.queryFactory = self.queryFactory
else:
p.queryFactory = factory
return p
def test_results(self):
inputOutput = [
("add", (2, 3), 5),
("defer", ("a",), "a"),
("dict", ({"a": 1}, "a"), 1),
("pair", ("a", 1), ["a", 1]),
("complex", (), {"a": ["b", "c", 12, []], "D": "foo"})]
dl = []
for meth, args, outp in inputOutput:
d = self.proxy().callRemote(meth, *args)
d.addCallback(self.assertEquals, outp)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def test_errors(self):
"""
Verify that for each way a method exposed via XML-RPC can fail, the
correct 'Content-type' header is set in the response and that the
client-side Deferred is errbacked with an appropriate C{Fault}
instance.
"""
dl = []
for code, methodName in [(666, "fail"), (666, "deferFail"),
(12, "fault"), (23, "noSuchMethod"),
(17, "deferFault"), (42, "SESSION_TEST")]:
d = self.proxy().callRemote(methodName)
d = self.assertFailure(d, xmlrpc.Fault)
d.addCallback(lambda exc, code=code:
self.assertEquals(exc.faultCode, code))
dl.append(d)
d = defer.DeferredList(dl, fireOnOneErrback=True)
def cb(ign):
for factory in self.factories:
self.assertEquals(factory.headers['content-type'],
'text/xml')
self.flushLoggedErrors(TestRuntimeError, TestValueError)
d.addCallback(cb)
return d
def test_cancel(self):
"""
A deferred from the Proxy can be cancelled, disconnecting
the L{twisted.internet.interfaces.IConnector}.
"""
def factory(*args, **kw):
factory.f = TestQueryFactoryCancel(*args, **kw)
return factory.f
d = self.proxy(factory).callRemote('add', 2, 3)
self.assertNotEquals(factory.f.connector.state, "disconnected")
d.cancel()
self.assertEquals(factory.f.connector.state, "disconnected")
d = self.assertFailure(d, defer.CancelledError)
return d
def test_errorGet(self):
"""
A classic GET on the xml server should return a NOT_ALLOWED.
"""
d = client.getPage("http://127.0.0.1:%d/" % (self.port,))
d = self.assertFailure(d, error.Error)
d.addCallback(
lambda exc: self.assertEquals(int(exc.args[0]), http.NOT_ALLOWED))
return d
def test_errorXMLContent(self):
"""
Test that an invalid XML input returns an L{xmlrpc.Fault}.
"""
d = client.getPage("http://127.0.0.1:%d/" % (self.port,),
method="POST", postdata="foo")
def cb(result):
self.assertRaises(xmlrpc.Fault, xmlrpclib.loads, result)
d.addCallback(cb)
return d
def test_datetimeRoundtrip(self):
"""
If an L{xmlrpclib.DateTime} is passed as an argument to an XML-RPC
call and then returned by the server unmodified, the result should
be equal to the original object.
"""
when = xmlrpclib.DateTime()
d = self.proxy().callRemote("echo", when)
d.addCallback(self.assertEqual, when)
return d
def test_doubleEncodingError(self):
"""
If it is not possible to encode a response to the request (for example,
because L{xmlrpclib.dumps} raises an exception when encoding a
L{Fault}) the exception which prevents the response from being
generated is logged and the request object is finished anyway.
"""
d = self.proxy().callRemote("echo", "")
# *Now* break xmlrpclib.dumps. Hopefully the client already used it.
def fakeDumps(*args, **kwargs):
raise RuntimeError("Cannot encode anything at all!")
self.patch(xmlrpclib, 'dumps', fakeDumps)
# It doesn't matter how it fails, so long as it does. Also, it happens
# to fail with an implementation detail exception right now, not
# something suitable as part of a public interface.
d = self.assertFailure(d, Exception)
def cbFailed(ignored):
# The fakeDumps exception should have been logged.
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
d.addCallback(cbFailed)
return d
def test_closeConnectionAfterRequest(self):
"""
The connection to the web server is closed when the request is done.
"""
d = self.proxy().callRemote('echo', '')
def responseDone(ignored):
[factory] = self.factories
self.assertFalse(factory.transport.connected)
self.assertTrue(factory.transport.disconnected)
return d.addCallback(responseDone)
class XMLRPCTestCase2(XMLRPCTestCase):
"""
Test with proxy that doesn't add a slash.
"""
def proxy(self, factory=None):
p = xmlrpc.Proxy("http://127.0.0.1:%d" % self.port)
if factory is None:
p.queryFactory = self.queryFactory
else:
p.queryFactory = factory
return p
class SerializationConfigMixin:
"""
Mixin which defines a couple tests which should pass when a particular flag
is passed to L{XMLRPC}.
These are not meant to be exhaustive serialization tests, since L{xmlrpclib}
does all of the actual serialization work. They are just meant to exercise
a few codepaths to make sure we are calling into xmlrpclib correctly.
@ivar flagName: A C{str} giving the name of the flag which must be passed to
L{XMLRPC} to allow the tests to pass. Subclasses should set this.
@ivar value: A value which the specified flag will allow the serialization
of. Subclasses should set this.
"""
def setUp(self):
"""
Create a new XML-RPC server with C{allowNone} set to C{True}.
"""
kwargs = {self.flagName: True}
self.p = reactor.listenTCP(
0, server.Site(Test(**kwargs)), interface="127.0.0.1")
self.addCleanup(self.p.stopListening)
self.port = self.p.getHost().port
self.proxy = xmlrpc.Proxy(
"http://127.0.0.1:%d/" % (self.port,), **kwargs)
def test_roundtripValue(self):
"""
C{self.value} can be round-tripped over an XMLRPC method call/response.
"""
d = self.proxy.callRemote('defer', self.value)
d.addCallback(self.assertEquals, self.value)
return d
def test_roundtripNestedValue(self):
"""
A C{dict} which contains C{self.value} can be round-tripped over an
XMLRPC method call/response.
"""
d = self.proxy.callRemote('defer', {'a': self.value})
d.addCallback(self.assertEquals, {'a': self.value})
return d
class XMLRPCAllowNoneTestCase(SerializationConfigMixin, unittest.TestCase):
"""
Tests for passing C{None} when the C{allowNone} flag is set.
"""
flagName = "allowNone"
value = None
try:
xmlrpclib.loads(xmlrpclib.dumps(({}, {})), use_datetime=True)
except TypeError:
_datetimeSupported = False
else:
_datetimeSupported = True
class XMLRPCUseDateTimeTestCase(SerializationConfigMixin, unittest.TestCase):
"""
Tests for passing a C{datetime.datetime} instance when the C{useDateTime}
flag is set.
"""
flagName = "useDateTime"
value = datetime.datetime(2000, 12, 28, 3, 45, 59)
if not _datetimeSupported:
skip = (
"Available version of xmlrpclib does not support datetime "
"objects.")
class XMLRPCDisableUseDateTimeTestCase(unittest.TestCase):
"""
Tests for the C{useDateTime} flag on Python 2.4.
"""
if _datetimeSupported:
skip = (
"Available version of xmlrpclib supports datetime objects.")
def test_cannotInitializeWithDateTime(self):
"""
L{XMLRPC} raises L{RuntimeError} if passed C{True} for C{useDateTime}.
"""
self.assertRaises(RuntimeError, XMLRPC, useDateTime=True)
self.assertRaises(
RuntimeError, Proxy, "http://localhost/", useDateTime=True)
def test_cannotSetDateTime(self):
"""
Setting L{XMLRPC.useDateTime} to C{True} after initialization raises
L{RuntimeError}.
"""
xmlrpc = XMLRPC(useDateTime=False)
self.assertRaises(RuntimeError, setattr, xmlrpc, "useDateTime", True)
proxy = Proxy("http://localhost/", useDateTime=False)
self.assertRaises(RuntimeError, setattr, proxy, "useDateTime", True)
class XMLRPCTestAuthenticated(XMLRPCTestCase):
"""
Test with authenticated proxy. We run this with the same inout/ouput as
above.
"""
user = "username"
password = "asecret"
def setUp(self):
self.p = reactor.listenTCP(0, server.Site(TestAuthHeader()),
interface="127.0.0.1")
self.port = self.p.getHost().port
self.factories = []
def test_authInfoInURL(self):
p = xmlrpc.Proxy("http://%s:%s@127.0.0.1:%d/" % (
self.user, self.password, self.port))
d = p.callRemote("authinfo")
d.addCallback(self.assertEquals, [self.user, self.password])
return d
def test_explicitAuthInfo(self):
p = xmlrpc.Proxy("http://127.0.0.1:%d/" % (
self.port,), self.user, self.password)
d = p.callRemote("authinfo")
d.addCallback(self.assertEquals, [self.user, self.password])
return d
def test_explicitAuthInfoOverride(self):
p = xmlrpc.Proxy("http://wrong:info@127.0.0.1:%d/" % (
self.port,), self.user, self.password)
d = p.callRemote("authinfo")
d.addCallback(self.assertEquals, [self.user, self.password])
return d
class XMLRPCTestIntrospection(XMLRPCTestCase):
def setUp(self):
xmlrpc = Test()
addIntrospection(xmlrpc)
self.p = reactor.listenTCP(0, server.Site(xmlrpc),interface="127.0.0.1")
self.port = self.p.getHost().port
self.factories = []
def test_listMethods(self):
def cbMethods(meths):
meths.sort()
self.assertEqual(
meths,
['add', 'complex', 'defer', 'deferFail',
'deferFault', 'dict', 'echo', 'fail', 'fault',
'pair', 'system.listMethods',
'system.methodHelp',
'system.methodSignature', 'withRequest'])
d = self.proxy().callRemote("system.listMethods")
d.addCallback(cbMethods)
return d
def test_methodHelp(self):
inputOutputs = [
("defer", "Help for defer."),
("fail", ""),
("dict", "Help for dict.")]
dl = []
for meth, expected in inputOutputs:
d = self.proxy().callRemote("system.methodHelp", meth)
d.addCallback(self.assertEquals, expected)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def test_methodSignature(self):
inputOutputs = [
("defer", ""),
("add", [['int', 'int', 'int'],
['double', 'double', 'double']]),
("pair", [['array', 'string', 'int']])]
dl = []
for meth, expected in inputOutputs:
d = self.proxy().callRemote("system.methodSignature", meth)
d.addCallback(self.assertEquals, expected)
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
class XMLRPCClientErrorHandling(unittest.TestCase):
"""
Test error handling on the xmlrpc client.
"""
def setUp(self):
self.resource = static.Data(
"This text is not a valid XML-RPC response.",
"text/plain")
self.resource.isLeaf = True
self.port = reactor.listenTCP(0, server.Site(self.resource),
interface='127.0.0.1')
def tearDown(self):
return self.port.stopListening()
def test_erroneousResponse(self):
"""
Test that calling the xmlrpc client on a static http server raises
an exception.
"""
proxy = xmlrpc.Proxy("http://127.0.0.1:%d/" %
(self.port.getHost().port,))
return self.assertFailure(proxy.callRemote("someMethod"), Exception)
class TestQueryFactoryParseResponse(unittest.TestCase):
"""
Test the behaviour of L{_QueryFactory.parseResponse}.
"""
def setUp(self):
# The _QueryFactory that we are testing. We don't care about any
# of the constructor parameters.
self.queryFactory = _QueryFactory(
path=None, host=None, method='POST', user=None, password=None,
allowNone=False, args=())
# An XML-RPC response that will parse without raising an error.
self.goodContents = xmlrpclib.dumps(('',))
# An 'XML-RPC response' that will raise a parsing error.
self.badContents = 'invalid xml'
# A dummy 'reason' to pass to clientConnectionLost. We don't care
# what it is.
self.reason = failure.Failure(ConnectionDone())
def test_parseResponseCallbackSafety(self):
"""
We can safely call L{_QueryFactory.clientConnectionLost} as a callback
of L{_QueryFactory.parseResponse}.
"""
d = self.queryFactory.deferred
# The failure mode is that this callback raises an AlreadyCalled
# error. We have to add it now so that it gets called synchronously
# and triggers the race condition.
d.addCallback(self.queryFactory.clientConnectionLost, self.reason)
self.queryFactory.parseResponse(self.goodContents)
return d
def test_parseResponseErrbackSafety(self):
"""
We can safely call L{_QueryFactory.clientConnectionLost} as an errback
of L{_QueryFactory.parseResponse}.
"""
d = self.queryFactory.deferred
# The failure mode is that this callback raises an AlreadyCalled
# error. We have to add it now so that it gets called synchronously
# and triggers the race condition.
d.addErrback(self.queryFactory.clientConnectionLost, self.reason)
self.queryFactory.parseResponse(self.badContents)
return d
def test_badStatusErrbackSafety(self):
"""
We can safely call L{_QueryFactory.clientConnectionLost} as an errback
of L{_QueryFactory.badStatus}.
"""
d = self.queryFactory.deferred
# The failure mode is that this callback raises an AlreadyCalled
# error. We have to add it now so that it gets called synchronously
# and triggers the race condition.
d.addErrback(self.queryFactory.clientConnectionLost, self.reason)
self.queryFactory.badStatus('status', 'message')
return d
def test_parseResponseWithoutData(self):
"""
Some server can send a response without any data:
L{_QueryFactory.parseResponse} should catch the error and call the
result errback.
"""
content = """
<methodResponse>
<params>
<param>
</param>
</params>
</methodResponse>"""
d = self.queryFactory.deferred
self.queryFactory.parseResponse(content)
return self.assertFailure(d, IndexError)
class XMLRPCTestWithRequest(unittest.TestCase):
def setUp(self):
self.resource = Test()
def test_withRequest(self):
"""
When an XML-RPC method is called and the implementation is
decorated with L{withRequest}, the request object is passed as
the first argument.
"""
request = DummyRequest('/RPC2')
request.method = "POST"
request.content = StringIO(xmlrpclib.dumps(("foo",), 'withRequest'))
def valid(n, request):
data = xmlrpclib.loads(request.written[0])
self.assertEquals(data, (('POST foo',), None))
d = request.notifyFinish().addCallback(valid, request)
self.resource.render_POST(request)
return d
| apache-2.0 |
ARCCN/elt | server/ext/debugger/elt/logger/messages.py | 2 | 1442 | from ..message_server import Message, ClosingMessage
from ..interaction import instantiate
from ..network_error import NetworkError
class HelloMessage(Message):
"""
Let's name ourselves!
"""
def __init__(self, name=""):
Message.__init__(self)
self.name = name
def __setstate__(self, d):
if "name" in d:
self.name = d["name"]
class LogMessage(Message):
"""
Error to be saved to log.
"""
def __init__(self, event=None):
Message.__init__(self)
self.event = event
def __setstate__(self, d):
if "event" in d:
self.event = d["event"]
if isinstance(self.event, dict):
self.event = NetworkError()
self.event.__setstate__(d["event"])
class ReportQuery(Message):
"""
Request log files encoded into specific format.
"""
def __init__(self, fmt="pure"):
Message.__init__(self)
self.fmt = fmt
def __setstate__(self, d):
if "fmt" in d:
self.fmt = d["fmt"]
class ReportReply(Message):
"""
Return log files encoded into specific format.
"""
def __init__(self, report=None, fmt="pure"):
Message.__init__(self)
self.fmt = fmt
self.report = report
def __setstate__(self, d):
if "fmt" in d:
self.fmt = d["fmt"]
if "report" in d:
self.report = d["report"]
| bsd-3-clause |
kumarshivam675/Mobile10X-Hack | sidd/virtualenv-14.0.6/flask/lib/python2.7/site-packages/pip/_vendor/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| gpl-3.0 |
Br3nda/calcalcal | pylib/sqlalchemy/dialects/oracle/zxjdbc.py | 12 | 7743 | # oracle/zxjdbc.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: oracle+zxjdbc
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
:driverurl: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html.
"""
import decimal
import re
from sqlalchemy import sql, types as sqltypes, util
from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext
from sqlalchemy.engine import result as _result
from sqlalchemy.sql import expression
import collections
SQLException = zxJDBC = None
class _ZxJDBCDate(sqltypes.Date):
def result_processor(self, dialect, coltype):
def process(value):
if value is None:
return None
else:
return value.date()
return process
class _ZxJDBCNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
#XXX: does the dialect return Decimal or not???
# if it does (in all cases), we could use a None processor as well as
# the to_float generic processor
if self.asdecimal:
def process(value):
if isinstance(value, decimal.Decimal):
return value
else:
return decimal.Decimal(str(value))
else:
def process(value):
if isinstance(value, decimal.Decimal):
return float(value)
else:
return value
return process
class OracleCompiler_zxjdbc(OracleCompiler):
def returning_clause(self, stmt, returning_cols):
self.returning_cols = list(expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
columns = [self.process(c, within_columns_clause=False, result_map=self.result_map)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
self.returning_parameters = []
binds = []
for i, col in enumerate(self.returning_cols):
dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi)
self.returning_parameters.append((i + 1, dbtype))
bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype))
self.binds[bindparam.key] = bindparam
binds.append(self.bindparam_string(self._truncate_bindparam(bindparam)))
return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds)
class OracleExecutionContext_zxjdbc(OracleExecutionContext):
def pre_exec(self):
if hasattr(self.compiled, 'returning_parameters'):
# prepare a zxJDBC statement so we can grab its underlying
# OraclePreparedStatement's getReturnResultSet later
self.statement = self.cursor.prepare(self.statement)
def get_result_proxy(self):
if hasattr(self.compiled, 'returning_parameters'):
rrs = None
try:
try:
rrs = self.statement.__statement__.getReturnResultSet()
rrs.next()
except SQLException, sqle:
msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode())
if sqle.getSQLState() is not None:
msg += ' [SQLState: %s]' % sqle.getSQLState()
raise zxJDBC.Error(msg)
else:
row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype)
for index, dbtype in self.compiled.returning_parameters)
return ReturningResultProxy(self, row)
finally:
if rrs is not None:
try:
rrs.close()
except SQLException:
pass
self.statement.close()
return _result.ResultProxy(self)
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
return cursor
class ReturningResultProxy(_result.FullyBufferedResultProxy):
"""ResultProxy backed by the RETURNING ResultSet results."""
def __init__(self, context, returning_row):
self._returning_row = returning_row
super(ReturningResultProxy, self).__init__(context)
def _cursor_description(self):
ret = []
for c in self.context.compiled.returning_cols:
if hasattr(c, 'name'):
ret.append((c.name, c.type))
else:
ret.append((c.anon_label, c.type))
return ret
def _buffer_rows(self):
return collections.deque([self._returning_row])
class ReturningParam(object):
"""A bindparam value representing a RETURNING parameter.
Specially handled by OracleReturningDataHandler.
"""
def __init__(self, type):
self.type = type
def __eq__(self, other):
if isinstance(other, ReturningParam):
return self.type == other.type
return NotImplemented
def __ne__(self, other):
if isinstance(other, ReturningParam):
return self.type != other.type
return NotImplemented
def __repr__(self):
kls = self.__class__
return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self),
self.type)
class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect):
jdbc_db_name = 'oracle'
jdbc_driver_name = 'oracle.jdbc.OracleDriver'
statement_compiler = OracleCompiler_zxjdbc
execution_ctx_cls = OracleExecutionContext_zxjdbc
colspecs = util.update_copy(
OracleDialect.colspecs,
{
sqltypes.Date: _ZxJDBCDate,
sqltypes.Numeric: _ZxJDBCNumeric
}
)
def __init__(self, *args, **kwargs):
super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs)
global SQLException, zxJDBC
from java.sql import SQLException
from com.ziclix.python.sql import zxJDBC
from com.ziclix.python.sql.handler import OracleDataHandler
class OracleReturningDataHandler(OracleDataHandler):
"""zxJDBC DataHandler that specially handles ReturningParam."""
def setJDBCObject(self, statement, index, object, dbtype=None):
if type(object) is ReturningParam:
statement.registerReturnParameter(index, object.type)
elif dbtype is None:
OracleDataHandler.setJDBCObject(
self, statement, index, object)
else:
OracleDataHandler.setJDBCObject(
self, statement, index, object, dbtype)
self.DataHandler = OracleReturningDataHandler
def initialize(self, connection):
super(OracleDialect_zxjdbc, self).initialize(connection)
self.implicit_returning = connection.connection.driverversion >= '10.2'
def _create_jdbc_url(self, url):
return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database)
def _get_server_version_info(self, connection):
version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1)
return tuple(int(x) for x in version.split('.'))
dialect = OracleDialect_zxjdbc
| mit |
rfancn/myprojects | spload/network/urllibhttprequest.py | 1 | 6012 | """
Copyright (C) 2010-2013, Ryan Fan
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Library General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
"""
import httplib, urllib, urllib2
import logging
import gzip
import urlparse
import os
from httplib import BadStatusLine
from StringIO import StringIO
# socket timeout in seconds
DEFAULT_SOCKET_TIMEOUT = 180
bad_headers = range(400, 404) + range(405, 418) + range(500, 506)
class BadHeader(Exception):
def __init__(self, code, content=""):
Exception.__init__(self, "Bad server response: %s %s" % (code, httplib.responses.get(int(code), "Unknown Header")))
self.code = code
self.content = content
class UrllibHTTPRequest:
def __init__(self, cookiefile):
self.cj = None
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
self.opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20100101 Firefox/15.0.1'),
( 'Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),
('Accept-Language', 'zh-cn,zh;q=0.8,en-us;q=0.5,en;q=0.3'),
('Accept-Encoding', 'gzip, deflate'),
('Connection', 'keep-alive')
]
self.lastURL = None
self.lastEffectiveURL = None
self.abort = False
# last http code
self.code = 0
self.httpheader = ""
self.httpbody = ""
def headFetch(self, url, data = None):
if data: data = urllib.urlencode(data)
url = url + '?' + data
parts = urlparse.urlparse(url)
if not parts.path:
path = "/"
if parts.params:
path = path + ";" + parts.params
if parts.query:
path = path + "?" + parts.query
conn = httplib.HTTPConnection(parts.netloc, timeout=10)
conn.request('HEAD',path)
response = conn.getresponse()
self.code = self.verifyHeader(response.status)
self.httpheader = response.getheaders()
# TODO: raise BadHeader may abort the conn.close
conn.close()
return self.httpheader
#parse header
#header = {"code": self.code}
#for line in response.splitlines():
# line = line.strip()
# if not line or ":" not in line: continue
# key, none, value = line.partition(":")
# key = key.lower().strip()
# value = value.strip()
#
# if key in header:
# if type(header[key]) == list:
# header[key].append(value)
# else:
# header[key] = [header[key], value]
# else:
# header[key] = value
# res = header
def fetch(self, url, method="GET", data = None):
# if need fetch both http header and body
if data: data = urllib.urlencode(data)
req = urllib2.Request(url, data)
method = method.upper()
if method not in ['POST', 'GET']:
logging.error("http fetch method is not valid: %s!" % method)
return None
else:
req.get_method = lambda:method
if data and len(data) > 10:
data = "..." + data[-10:]
logging.debug("http method: %s, url: %s, data: %s" % (method, url, data))
try:
response = self.opener.open(req, timeout=DEFAULT_SOCKET_TIMEOUT)
except IOError, e:
logging.error("We failed to open %s." % url)
if hasattr(e, 'code'):
logging.error("Failed with code - %s" % e.code)
elif hasattr(e, 'reason'):
logging.error('''Failed to reach a server,
This usually means the server doesn't exist or down,
or we don't have an internet connection.
The reason is %s.''' % e.reason)
return None
except BadStatusLine:
logging.error("Failed to fetch url: %s. because of: BadStatusLine" % url)
return None
except Exception, e:
logging.error("Failed to fetch url: %s. because of: %s" % (url, e))
return None
# save server side cookie info
self.cj.save()
# handle response
self.lastEffectiveURL = response.geturl()
self.httpheader = response.info().headers
# try to get html content
keys= map(lambda s:s.upper(), response.info().keys())
values = map(lambda s:s.upper(), response.info().values())
if ('CONTENT-ENCODING' in keys) and ('GZIP' in values):
buf = StringIO(response.read())
fp = gzip.GzipFile(fileobj=buf)
self.httpbody = fp.read()
else:
self.httpbody = response.read()
self.code = self.verifyHeader(response.getcode())
return self.httpbody
def verifyHeader(self, httpcode):
""" raise an exceptions on bad headers """
code = int(httpcode)
# TODO: raise anyway to be consistent, also rename exception
if code in bad_headers:
#404 will NOT raise an exception
raise BadHeader(code, self.httpbody)
return code | mit |
xujb/odoo | addons/website_mail/controllers/email_designer.py | 243 | 3151 | # -*- coding: utf-8 -*-
from urllib import urlencode
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.mail import html_sanitize
class WebsiteEmailDesigner(http.Controller):
@http.route('/website_mail/email_designer', type='http', auth="user", website=True)
def index(self, model, res_id, template_model=None, **kw):
if not model or not model in request.registry or not res_id:
return request.redirect('/')
model_fields = request.registry[model]._fields
if 'body' not in model_fields and 'body_html' not in model_fields or \
'email' not in model_fields and 'email_from' not in model_fields or \
'name' not in model_fields and 'subject' not in model_fields:
return request.redirect('/')
res_id = int(res_id)
obj_ids = request.registry[model].exists(request.cr, request.uid, [res_id], context=request.context)
if not obj_ids:
return request.redirect('/')
# try to find fields to display / edit -> as t-field is static, we have to limit
# the available fields to a given subset
email_from_field = 'email'
if 'email_from' in model_fields:
email_from_field = 'email_from'
subject_field = 'name'
if 'subject' in model_fields:
subject_field = 'subject'
body_field = 'body'
if 'body_html' in model_fields:
body_field = 'body_html'
cr, uid, context = request.cr, request.uid, request.context
record = request.registry[model].browse(cr, uid, res_id, context=context)
values = {
'record': record,
'templates': None,
'model': model,
'res_id': res_id,
'email_from_field': email_from_field,
'subject_field': subject_field,
'body_field': body_field,
'return_action': kw.get('return_action', ''),
}
if getattr(record, body_field):
values['mode'] = 'email_designer'
else:
if kw.get('enable_editor'):
kw.pop('enable_editor')
fragments = dict(model=model, res_id=res_id, **kw)
if template_model:
fragments['template_model'] = template_model
return request.redirect('/website_mail/email_designer?%s' % urlencode(fragments))
values['mode'] = 'email_template'
tmpl_obj = request.registry['email.template']
if template_model:
tids = tmpl_obj.search(cr, uid, [('model', '=', template_model)], context=context)
else:
tids = tmpl_obj.search(cr, uid, [], context=context)
templates = tmpl_obj.browse(cr, uid, tids, context=context)
values['templates'] = templates
values['html_sanitize'] = html_sanitize
return request.website.render("website_mail.email_designer", values)
@http.route(['/website_mail/snippets'], type='json', auth="user", website=True)
def snippets(self):
return request.website._render('website_mail.email_designer_snippets')
| agpl-3.0 |
digisavvy/pureallex | wp-content/themes/pureallax/node_modules/browser-sync/node_modules/socket.io/node_modules/socket.io-client/node_modules/engine.io-client/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| gpl-2.0 |
prrvchr/GContactOOo | CloudContactOOo/OAuth2OOo/OAuth2OOo/pythonpath/oauth2/oauth2setting.py | 1 | 13385 | #!
# -*- coding: utf_8 -*-
"""
╔════════════════════════════════════════════════════════════════════════════════════╗
║ ║
║ Copyright (c) 2020 https://prrvchr.github.io ║
║ ║
║ Permission is hereby granted, free of charge, to any person obtaining ║
║ a copy of this software and associated documentation files (the "Software"), ║
║ to deal in the Software without restriction, including without limitation ║
║ the rights to use, copy, modify, merge, publish, distribute, sublicense, ║
║ and/or sell copies of the Software, and to permit persons to whom the Software ║
║ is furnished to do so, subject to the following conditions: ║
║ ║
║ The above copyright notice and this permission notice shall be included in ║
║ all copies or substantial portions of the Software. ║
║ ║
║ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ║
║ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES ║
║ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ║
║ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ║
║ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ║
║ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE ║
║ OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ║
║ ║
╚════════════════════════════════════════════════════════════════════════════════════╝
"""
import uno
import unohelper
from com.sun.star.embed import XTransactedObject
from com.sun.star.util import XUpdatable
from unolib import PropertySet
from unolib import getProperty
from unolib import getConfiguration
from unolib import KeyMap
from .configuration import g_identifier
from .configuration import g_refresh_overlap
import time
class OAuth2Setting(unohelper.Base,
XTransactedObject,
PropertySet):
def __init__(self, ctx):
self.ctx = ctx
self.configuration = getConfiguration(self.ctx, g_identifier, True)
self.Url = UrlSetting(self.configuration)
self.revert()
@property
def Timeout(self):
if self.ConnectTimeout and self.ReadTimeout:
return self.ConnectTimeout, self.ReadTimeout
elif self.ConnectTimeout:
return self.ConnectTimeout
elif self.ReadTimeout:
return self.ReadTimeout
return None
@property
def Initialized(self):
return all((self.Url.Scope.Provider.MetaData, self.Url.Scope.Provider.User.IsValid))
# XTransactedObject
def commit(self):
self.configuration.replaceByName('ConnectTimeout', self.ConnectTimeout)
self.configuration.replaceByName('ReadTimeout', self.ReadTimeout)
self.configuration.replaceByName('HandlerTimeout', self.HandlerTimeout)
if self.configuration.hasPendingChanges():
self.configuration.commitChanges()
def revert(self):
self.HandlerTimeout = self.configuration.getByName('HandlerTimeout')
self.ConnectTimeout = self.configuration.getByName('ConnectTimeout')
self.ReadTimeout = self.configuration.getByName('ReadTimeout')
def _getPropertySetInfo(self):
properties = {}
readonly = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.READONLY')
transient = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.TRANSIENT')
properties['Url'] = getProperty('Url', 'com.sun.star.uno.XInterface', readonly)
properties['HandlerTimeout'] = getProperty('HandlerTimeout', 'short', transient)
properties['ConnectTimeout'] = getProperty('ConnectTimeout', 'short', transient)
properties['ReadTimeout'] = getProperty('ReadTimeout', 'short', transient)
properties['Timeout'] = getProperty('Timeout', 'any', readonly)
properties['Initialized'] = getProperty('Initialized', 'boolean', readonly)
return properties
class UrlSetting(unohelper.Base,
PropertySet):
def __init__(self, configuration):
self.configuration = configuration
self.Scope = ScopeSetting(self.configuration)
self._Id = ''
self.Urls = self._getUrls()
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, id):
self.Urls = self._getUrls()
if id in self.Urls:
self._Id = id
scope = self.Urls[id]
else:
self._Id = ''
scope = ''
self.Scope.Id = scope
@property
def UrlList(self):
return tuple(self.Urls.keys())
@property
def Initialized(self):
return self.Id != ''
def _getUrls(self):
urls = {}
url = self.configuration.getByName('Urls')
for id in url.ElementNames:
urls[id] = url.getByName(id).getByName('Scope')
return urls
def _getPropertySetInfo(self):
properties = {}
readonly = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.READONLY')
transient = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.TRANSIENT')
properties['Scope'] = getProperty('Scope', 'com.sun.star.uno.XInterface', readonly)
properties['Id'] = getProperty('Id', 'string', transient)
properties['UrlList'] = getProperty('UrlList', '[]string', readonly)
properties['Initialized'] = getProperty('Initialized', 'boolean', readonly)
return properties
class ScopeSetting(unohelper.Base,
PropertySet):
def __init__(self, configuration):
self._Id = ''
self._Scopes = []
self.configuration = configuration
self.Provider = ProviderSetting(self.configuration)
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, id):
scopes = self.configuration.getByName('Scopes')
if scopes.hasByName(id):
self._Id = id
s = scopes.getByName(id)
self._Scopes = s.getByName('Values')
provider = s.getByName('Provider')
else:
self._Id = ''
self._Scopes = []
provider = ''
self.Provider.Id = provider
@property
def Authorized(self):
authorized = len(self._Scopes) > 0
for scope in self._Scopes:
if scope not in self.Provider.User.Scopes:
authorized = False
break
return authorized
def _getPropertySetInfo(self):
properties = {}
readonly = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.READONLY')
transient = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.TRANSIENT')
properties['Provider'] = getProperty('Provider', 'com.sun.star.uno.XInterface', readonly)
properties['Id'] = getProperty('Id', 'string', transient)
properties['Authorized'] = getProperty('Authorized', 'boolean', readonly)
return properties
class ProviderSetting(unohelper.Base,
XTransactedObject,
PropertySet):
def __init__(self, configuration):
self.configuration = configuration
self.User = UserSetting(self.configuration)
self.MetaData = None
@property
def Id(self):
return self.User._ProviderId
@Id.setter
def Id(self, id):
self.User._ProviderId = id
self.MetaData = self._getMetaData(id)
def _getMetaData(self, id):
metadata = None
providers = self.configuration.getByName('Providers')
if providers.hasByName(id):
provider = providers.getByName(id)
metadata = KeyMap()
metadata.insertValue('ClientSecret', provider.getByName('ClientSecret'))
metadata.insertValue('ClientId', provider.getByName('ClientId'))
metadata.insertValue('TokenUrl', provider.getByName('TokenUrl'))
metadata.insertValue('TokenParameters', provider.getByName('TokenParameters'))
return metadata
def _getPropertySetInfo(self):
properties = {}
readonly = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.READONLY')
properties['User'] = getProperty('User', 'com.sun.star.uno.XInterface', readonly)
properties['MetaData'] = getProperty('MetaData', 'com.sun.star.auth.XRestKeyMap', readonly)
return properties
class UserSetting(unohelper.Base,
XTransactedObject,
PropertySet):
def __init__(self, configuration):
self.configuration = configuration
self._Id = ''
self._ProviderId = ''
self._TimeStamp = 0
self.Scopes = ()
self.AccessToken = ''
self.RefreshToken = ''
self.NeverExpires = False
@property
def Id(self):
return self._Id
@Id.setter
def Id(self, id):
self._Id = id
self.revert()
@property
def HasExpired(self):
if self.NeverExpires:
return False
now = int(time.time())
expire = max(0, self._TimeStamp - now)
return expire < g_refresh_overlap
@property
def MetaData(self):
metadata = KeyMap()
metadata.insertValue('AccessToken', self.AccessToken)
metadata.insertValue('RefreshToken', self.RefreshToken)
metadata.insertValue('NeverExpires', self.NeverExpires)
metadata.insertValue('TimeStamp', self._TimeStamp)
metadata.insertValue('Scopes', self.Scopes)
return metadata
@MetaData.setter
def MetaData(self, data):
self.AccessToken = data.getValue('AccessToken')
self._TimeStamp = data.getValue('TimeStamp')
self.commit()
@property
def IsValid(self):
if self.NeverExpires:
return all((self._ProviderId, self.Id, self.AccessToken, self.Scopes))
return all((self._ProviderId, self.Id, self.AccessToken, self.RefreshToken, self.Scopes))
# XTransactedObject
def commit(self):
providers = self.configuration.getByName('Providers')
if providers.hasByName(self._ProviderId):
provider = providers.getByName(self._ProviderId)
users = provider.getByName('Users')
if users.hasByName(self.Id):
user = users.getByName(self.Id)
user.replaceByName('AccessToken', self.AccessToken)
user.replaceByName('TimeStamp', self._TimeStamp)
arguments = ('Scopes', uno.Any('[]string', self.Scopes))
uno.invoke(user, 'replaceByName', arguments)
if self.configuration.hasPendingChanges():
self.configuration.commitChanges()
def revert(self):
accesstoken = ''
refreshtoken = ''
neverexpires = False
timestamp = 0
scopes = ()
providers = self.configuration.getByName('Providers')
if providers.hasByName(self._ProviderId):
provider = providers.getByName(self._ProviderId)
users = provider.getByName('Users')
if users.hasByName(self.Id):
user = users.getByName(self.Id)
accesstoken = user.getByName('AccessToken')
refreshtoken = user.getByName('RefreshToken')
neverexpires = user.getByName('NeverExpires')
timestamp = user.getByName('TimeStamp')
scopes = user.getByName('Scopes')
self.AccessToken = accesstoken
self.RefreshToken = refreshtoken
self.NeverExpires = neverexpires
self._TimeStamp = timestamp
self.Scopes = scopes
def _getPropertySetInfo(self):
properties = {}
readonly = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.READONLY')
transient = uno.getConstantByName('com.sun.star.beans.PropertyAttribute.TRANSIENT')
properties['Id'] = getProperty('Id', 'string', transient)
properties['AccessToken'] = getProperty('AccessToken', 'string', readonly)
properties['HasExpired'] = getProperty('HasExpired', 'boolean', readonly)
properties['IsValid'] = getProperty('IsValid', 'boolean', readonly)
properties['Scopes'] = getProperty('Scopes', '[]string', readonly)
properties['MetaData'] = getProperty('MetaData', 'com.sun.star.auth.XRestKeyMap', transient)
return properties
| gpl-3.0 |
jlspyaozhongkai/Uter | third_party_backup/Python-2.7.9/Demo/scripts/find-uname.py | 10 | 1207 | #!/usr/bin/env python
"""
For each argument on the command line, look for it in the set of all Unicode
names. Arguments are treated as case-insensitive regular expressions, e.g.:
% find-uname 'small letter a$' 'horizontal line'
*** small letter a$ matches ***
LATIN SMALL LETTER A (97)
COMBINING LATIN SMALL LETTER A (867)
CYRILLIC SMALL LETTER A (1072)
PARENTHESIZED LATIN SMALL LETTER A (9372)
CIRCLED LATIN SMALL LETTER A (9424)
FULLWIDTH LATIN SMALL LETTER A (65345)
*** horizontal line matches ***
HORIZONTAL LINE EXTENSION (9135)
"""
import unicodedata
import sys
import re
def main(args):
unicode_names = []
for ix in range(sys.maxunicode+1):
try:
unicode_names.append((ix, unicodedata.name(unichr(ix))))
except ValueError: # no name for the character
pass
for arg in args:
pat = re.compile(arg, re.I)
matches = [(y,x) for (x,y) in unicode_names
if pat.search(y) is not None]
if matches:
print "***", arg, "matches", "***"
for match in matches:
print "%s (%d)" % match
if __name__ == "__main__":
main(sys.argv[1:])
| gpl-3.0 |
dya2/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/mathtls.py | 273 | 11647 | """Miscellaneous helper functions."""
from utils.compat import *
from utils.cryptomath import *
import hmac
import md5
import sha
#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups]
goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\
(2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\
(2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\
(2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)]
def P_hash(hashModule, secret, seed, length):
bytes = createByteArrayZeros(length)
secret = bytesToString(secret)
seed = bytesToString(seed)
A = seed
index = 0
while 1:
A = hmac.HMAC(secret, A, hashModule).digest()
output = hmac.HMAC(secret, A+seed, hashModule).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def PRF(secret, label, seed, length):
#Split the secret into left and right halves
S1 = secret[ : int(math.ceil(len(secret)/2.0))]
S2 = secret[ int(math.floor(len(secret)/2.0)) : ]
#Run the left half through P_MD5 and the right half through P_SHA1
p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length)
p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length)
#XOR the output values and return the result
for x in range(length):
p_md5[x] ^= p_sha1[x]
return p_md5
def PRF_SSL(secret, seed, length):
secretStr = bytesToString(secret)
seedStr = bytesToString(seed)
bytes = createByteArrayZeros(length)
index = 0
for x in range(26):
A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc..
input = secretStr + sha.sha(A + secretStr + seedStr).digest()
output = md5.md5(input).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def makeX(salt, username, password):
if len(username)>=256:
raise ValueError("username too long")
if len(salt)>=256:
raise ValueError("salt too long")
return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\
.digest()).digest())
#This function is used by VerifierDB.makeVerifier
def makeVerifier(username, password, bits):
bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits]
g,N = goodGroupParameters[bitsIndex]
salt = bytesToString(getRandomBytes(16))
x = makeX(salt, username, password)
verifier = powMod(g, x, N)
return N, g, salt, verifier
def PAD(n, x):
nLength = len(numberToString(n))
s = numberToString(x)
if len(s) < nLength:
s = ("\0" * (nLength-len(s))) + s
return s
def makeU(N, A, B):
return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest())
def makeK(N, g):
return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest())
"""
MAC_SSL
Modified from Python HMAC by Trevor
"""
class MAC_SSL:
"""MAC_SSL class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new MAC_SSL object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod is None:
import md5
digestmod = md5
if key == None: #TREVNEW - for faster copying
return #TREVNEW
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
self.digest_size = digestmod.digest_size
ipad = "\x36" * 40
opad = "\x5C" * 40
self.inner.update(key)
self.inner.update(ipad)
self.outer.update(key)
self.outer.update(opad)
if msg is not None:
self.update(msg)
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = MAC_SSL(None) #TREVNEW - for faster copying
other.digest_size = self.digest_size #TREVNEW
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
| apache-2.0 |
artwr/airflow | airflow/sensors/base_sensor_operator.py | 2 | 5619 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from time import sleep
from datetime import timedelta
from airflow.exceptions import AirflowException, AirflowSensorTimeout, \
AirflowSkipException, AirflowRescheduleException
from airflow.models import BaseOperator
from airflow.models.skipmixin import SkipMixin
from airflow.models.taskreschedule import TaskReschedule
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
from airflow.ti_deps.deps.ready_to_reschedule import ReadyToRescheduleDep
class BaseSensorOperator(BaseOperator, SkipMixin):
"""
Sensor operators are derived from this class and inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the expected time until the criteria is met is. The poke
interval should be more than one minute to prevent too much load on
the scheduler.
:type mode: str
"""
ui_color = '#e6f1f2'
valid_modes = ['poke', 'reschedule']
@apply_defaults
def __init__(self,
poke_interval=60,
timeout=60 * 60 * 24 * 7,
soft_fail=False,
mode='poke',
*args,
**kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
if mode not in self.valid_modes:
raise AirflowException(
"The mode must be one of {valid_modes},"
"'{d}.{t}'; received '{m}'."
.format(valid_modes=self.valid_modes,
d=self.dag.dag_id if self.dag else "",
t=self.task_id, m=mode))
self.mode = mode
def poke(self, context):
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def execute(self, context):
started_at = timezone.utcnow()
if self.reschedule:
# If reschedule, use first start date of current try
task_reschedules = TaskReschedule.find_for_task_instance(context['ti'])
if task_reschedules:
started_at = task_reschedules[0].start_date
while not self.poke(context):
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
# If sensor is in soft fail mode but will be retried then
# give it a chance and fail with timeout.
# This gives the ability to set up non-blocking AND soft-fail sensors.
if self.soft_fail and not context['ti'].is_eligible_to_retry():
self._do_skip_downstream_tasks(context)
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
if self.reschedule:
reschedule_date = timezone.utcnow() + timedelta(
seconds=self.poke_interval)
raise AirflowRescheduleException(reschedule_date)
else:
sleep(self.poke_interval)
self.log.info("Success criteria met. Exiting.")
def _do_skip_downstream_tasks(self, context):
downstream_tasks = context['task'].get_flat_relatives(upstream=False)
self.log.debug("Downstream task_ids %s", downstream_tasks)
if downstream_tasks:
self.skip(context['dag_run'], context['ti'].execution_date, downstream_tasks)
@property
def reschedule(self):
return self.mode == 'reschedule'
@property
def deps(self):
"""
Adds one additional dependency for all sensor operators that
checks if a sensor task instance can be rescheduled.
"""
return BaseOperator.deps.fget(self) | {ReadyToRescheduleDep()}
| apache-2.0 |
lepistone/odoo | addons/procurement/__openerp__.py | 61 | 2482 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Procurements',
'version' : '1.0',
'author' : 'OpenERP SA',
'website' : 'http://www.openerp.com',
'category' : 'Hidden/Dependency',
'depends' : ['base', 'product'],
'description': """
This is the module for computing Procurements.
==============================================
In the MRP process, procurements orders are created to launch manufacturing
orders, purchase orders, stock allocations. Procurement orders are
generated automatically by the system and unless there is a problem, the
user will not be notified. In case of problems, the system will raise some
procurement exceptions to inform the user about blocking problems that need
to be resolved manually (like, missing BoM structure or missing supplier).
The procurement order will schedule a proposal for automatic procurement
for the product which needs replenishment. This procurement will start a
task, either a purchase order form for the supplier, or a production order
depending on the product's configuration.
""",
'data': [
'security/ir.model.access.csv',
'security/procurement_security.xml',
'procurement_data.xml',
'wizard/schedulers_all_view.xml',
'procurement_view.xml',
'company_view.xml',
],
'demo': [],
'test': ['test/procurement.yml'],
'installable': True,
'auto_install': True,
'images': ['images/compute_schedulers.jpeg','images/config_companies_sched.jpeg', 'images/minimum_stock_rules.jpeg'],
}
| agpl-3.0 |
mstriemer/olympia | src/olympia/tags/tests/test_views.py | 8 | 2569 | from django.core.urlresolvers import reverse, NoReverseMatch
from pyquery import PyQuery as pq
from olympia.amo.tests import TestCase
from olympia.addons.models import Addon
class TestManagement(TestCase):
fixtures = ['base/addon_3615',
'tags/tags.json']
def test_tags_details_view(self):
"""Test that there are some tags being shown on the details page."""
url = reverse('addons.detail_more', args=['a3615'])
r = self.client.get_ajax(url, follow=True)
doc = pq(r.content)
assert len(doc('li.tag')) == 4
assert 'Tags' in [d.text for d in doc('h3')]
class TestXSS(TestCase):
fixtures = ['base/addon_3615',
'tags/tags.json']
xss = "<script src='foo.bar'>"
escaped = "<script src='foo.bar'>"
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
self.tag = self.addon.tags.all()[0]
self.tag.tag_text = self.xss
self.tag.num_addons = 1
self.tag.save()
def test_tags_xss_detail(self):
"""Test xss tag detail."""
url = reverse('addons.detail_more', args=['a3615'])
r = self.client.get_ajax(url, follow=True)
assert self.escaped in r.content
assert self.xss not in r.content
class TestXSSURLFail(TestCase):
fixtures = ['base/addon_3615',
'tags/tags.json']
xss = "<script>alert('xss')</script>"
escaped = "<script>alert('xss')</script>"
def setUp(self):
self.addon = Addon.objects.get(pk=3615)
self.tag = self.addon.tags.all()[0]
self.tag.tag_text = self.xss
self.tag.num_addons = 1
self.tag.save()
def test_tags_xss(self):
"""Test xss tag detail."""
url = reverse('addons.detail_more', args=['a3615'])
r = self.client.get_ajax(url, follow=True)
assert self.escaped in r.content
assert self.xss not in r.content
def test_tags_xss_home(self):
"""Test xss tag home."""
self.assertRaises(NoReverseMatch, reverse,
'tags.detail', args=[self.xss])
def test_no_reverse(self):
assert not self.tag.can_reverse()
class TestNoTags(TestCase):
fixtures = ['base/addon_3615']
def test_tags_no_details_view(self):
"""Test that there is no tag header tags being shown."""
url = reverse('addons.detail', args=['a3615'])
r = self.client.get(url, follow=True)
doc = pq(r.content)
assert 'Tags' not in [d.text for d in doc('h3')]
| bsd-3-clause |
DavisPoGo/Monocle | scripts/export_accounts_csv.py | 1 | 1897 | #re!/usr/bin/env python3
import csv
import sys
from datetime import datetime
from pathlib import Path
monocle_dir = Path(__file__).resolve().parents[1]
sys.path.append(str(monocle_dir))
from monocle.accounts import get_accounts
accounts_file = monocle_dir / 'accounts.csv'
try:
now = datetime.now().strftime("%Y-%m-%d-%H%M")
accounts_file.rename('accounts-{}.csv'.format(now))
except FileNotFoundError:
pass
banned = []
with accounts_file.open('w') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
writer.writerow(('username', 'password', 'provider', 'model', 'iOS', 'id'))
ACCOUNTS = get_accounts()
for account in ACCOUNTS.values():
if account.get('banned', False):
banned.append(account)
continue
writer.writerow((account['username'],
account['password'],
account['provider'],
account['model'],
account['iOS'],
account['id']))
if banned:
banned_file = monocle_dir / 'banned.csv'
write_header = not banned_file.exists()
with banned_file.open('a') as csvfile:
writer = csv.writer(csvfile, delimiter=',')
if write_header:
writer.writerow(('username', 'password', 'provider', 'level', 'created', 'last used'))
for account in banned:
row = [account['username'], account['password'], account['provider']]
row.append(account.get('level'))
try:
row.append(datetime.fromtimestamp(account['created']).strftime('%x %X'))
except KeyError:
row.append(None)
try:
row.append(datetime.fromtimestamp(account['time']).strftime('%x %X'))
except KeyError:
row.append(None)
writer.writerow(row)
print('Done!')
| mit |
kingbryan/fractional | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| mit |
tuxfux-hlp-notes/python-batches | archieves/batch-65/16-files/sheets/lib/python2.7/site-packages/pkg_resources/_vendor/packaging/requirements.py | 454 | 4355 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import string
import re
from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException
from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine
from pkg_resources.extern.pyparsing import Literal as L # noqa
from pkg_resources.extern.six.moves.urllib import parse as urlparse
from .markers import MARKER_EXPR, Marker
from .specifiers import LegacySpecifier, Specifier, SpecifierSet
class InvalidRequirement(ValueError):
"""
An invalid requirement was found, users should refer to PEP 508.
"""
ALPHANUM = Word(string.ascii_letters + string.digits)
LBRACKET = L("[").suppress()
RBRACKET = L("]").suppress()
LPAREN = L("(").suppress()
RPAREN = L(")").suppress()
COMMA = L(",").suppress()
SEMICOLON = L(";").suppress()
AT = L("@").suppress()
PUNCTUATION = Word("-_.")
IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM)
IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END))
NAME = IDENTIFIER("name")
EXTRA = IDENTIFIER
URI = Regex(r'[^ ]+')("url")
URL = (AT + URI)
EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA)
EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras")
VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE)
VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY
VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE),
joinString=",", adjacent=False)("_raw_spec")
_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY))
_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '')
VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier")
VERSION_SPEC.setParseAction(lambda s, l, t: t[1])
MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker")
MARKER_EXPR.setParseAction(
lambda s, l, t: Marker(s[t._original_start:t._original_end])
)
MARKER_SEPERATOR = SEMICOLON
MARKER = MARKER_SEPERATOR + MARKER_EXPR
VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER)
URL_AND_MARKER = URL + Optional(MARKER)
NAMED_REQUIREMENT = \
NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER)
REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd
class Requirement(object):
"""Parse a requirement.
Parse a given requirement string into its parts, such as name, specifier,
URL, and extras. Raises InvalidRequirement on a badly-formed requirement
string.
"""
# TODO: Can we test whether something is contained within a requirement?
# If so how do we do that? Do we need to test against the _name_ of
# the thing as well as the version? What about the markers?
# TODO: Can we normalize the name and extra name?
def __init__(self, requirement_string):
try:
req = REQUIREMENT.parseString(requirement_string)
except ParseException as e:
raise InvalidRequirement(
"Invalid requirement, parse error at \"{0!r}\"".format(
requirement_string[e.loc:e.loc + 8]))
self.name = req.name
if req.url:
parsed_url = urlparse.urlparse(req.url)
if not (parsed_url.scheme and parsed_url.netloc) or (
not parsed_url.scheme and not parsed_url.netloc):
raise InvalidRequirement("Invalid URL given")
self.url = req.url
else:
self.url = None
self.extras = set(req.extras.asList() if req.extras else [])
self.specifier = SpecifierSet(req.specifier)
self.marker = req.marker if req.marker else None
def __str__(self):
parts = [self.name]
if self.extras:
parts.append("[{0}]".format(",".join(sorted(self.extras))))
if self.specifier:
parts.append(str(self.specifier))
if self.url:
parts.append("@ {0}".format(self.url))
if self.marker:
parts.append("; {0}".format(self.marker))
return "".join(parts)
def __repr__(self):
return "<Requirement({0!r})>".format(str(self))
| gpl-3.0 |
fafaman/django | tests/check_framework/test_caches.py | 249 | 1114 | from django.core.checks.caches import E001
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckCacheSettingsAppDirsTest(SimpleTestCase):
VALID_CACHES_CONFIGURATION = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
INVALID_CACHES_CONFIGURATION = {
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
@property
def func(self):
from django.core.checks.caches import check_default_cache_is_configured
return check_default_cache_is_configured
@override_settings(CACHES=VALID_CACHES_CONFIGURATION)
def test_default_cache_included(self):
"""
Don't error if 'default' is present in CACHES setting.
"""
self.assertEqual(self.func(None), [])
@override_settings(CACHES=INVALID_CACHES_CONFIGURATION)
def test_default_cache_not_included(self):
"""
Error if 'default' not present in CACHES setting.
"""
self.assertEqual(self.func(None), [E001])
| bsd-3-clause |
bhaskar24/ns_3_dev_RARED | utils/tests/test-waf.py | 73 | 7623 | #! /usr/bin/env python
## -*- Mode: python; py-indent-offset: 4; indent-tabs-mode: nil; coding: utf-8; -*-
#
# Copyright (c) 2014 Siddharth Santurkar
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# NOTE: Run this script with the Python3 interpreter if the python3 compatibility
# of the ns-3 unit test runner needs to be tested.
# The following options of waf are being tested for poratability by this script.
# To see the options supported by this script, run with the -h option on the command line
#
# build : executes the build (pre: configure, post: clean)
# check : run the equivalent of the old ns-3 unit tests using test.py
# clean : cleans the project
# configure: configures the project (pore: None, post: distclean)
# dist : makes a tarball for redistributing the sources (pre:none )
# distcheck: checks if the project compiles (tarball from 'dist') (pre: dist, post: rm -rf ns-3*.tar.bz2)
# docs : build all the documentation: doxygen, manual, tutorial, models (pre: configure; post: distclean)
# doxygen : do a full build, generate the introspected doxygen and then the doxygen
# install : installs the targets on the system (pre: configure, post: uninstall )
# list : lists the targets to execute (pre: configure)
# shell : run a shell with an environment suitably modified to run locally built programs (pre:configure)
# sphinx : build the Sphinx documentation: manual, tutorial, models
# step : executes tasks in a step-by-step fashion, for debugging (pre: configure)
# uninstall: removes the targets installed (pre: install, post uninstall)
# *update : updates the plugins from the *waflib/extras* directory
from __future__ import print_function
from TestBase import TestBaseClass
import sys
def replace(pre, post, main_cmd_list):
if pre:
pre = pre + ' && '
else:
pre = ''
if post:
post = ' && ' + post
else:
post = ''
return [ pre + main_cmd + post for main_cmd in main_cmd_list ]
def main(argv):
"""
Prepares test cases and executes
"""
runner = TestBaseClass(argv[1:], "Test suite for the ns-3 Waf build system", 'waf')
in_cmds = runner.override_cmds()
if in_cmds:
cmds = in_cmds.split(',')
else:
cmds = ['basic', 'build', 'configure', 'step', 'clean', 'dist', 'list']
config_test_cases = [
"--enable-gcov",
"--enable-sudo",
"--enable-sudo",
"--enable-tests",
"--disable-tests",
"--enable-examples",
"--disable-examples",
"--doxygen-no-build",
"--enable-static",
"--enable-mpi",
"--enable-rpath",
"--enable-modules=build/utils/test-runner.cc.1.o",
"--boost-static",
"--boost-mt",
"--boost-linkage_autodetect",
"--boost-python=33",
"--disable-gtk",
"--int64x64=cairo",
"--disable-pthread",
"--force-planetlab",
"--nopyc",
"--nopyo",
"--disable-python",
"--apiscan=all",
"--with-python=/usr/bin/python2.7",
"--no32bit-scan",
"-o test_out && rm -rf test_out",
"--out=test_out && rm -rf test_out",
"-t test_top && rm -rf test_top",
"--top=test_top && rm -rf test_top",
"--download",
"--check-c-compiler=gc",
"--check-cxx-compiler=g++",
]
basic_test_cases = [
"--version",
"-h",
"--help",
]
build_test_cases = [
"-j10",
"--jobs=10",
"-d optimized",
"-d debug",
"-d release",
"--build-profile optimized",
"--build-profile debug",
"--build-profile release",
"-p",
"--progress",
]
step_test_cases = [
"--files=\"*/main.c,*/test/main.o\"",
]
install_test_cases = [
"-f",
"--force",
"--prefix=./test-prefix && rm -rf ./test-prefix",
"--exec-prefix=.",
"--bindir=./test-prefix/bin --sbindir=./test-prefix/sbin --libexecdir=./test-prefix/libexec --sysconfdir=./test-prefix/etc --sharedstatedir=./test-prefix/com --localstatedir=./test-prefix/var --libdir=./test-prefix/lib --includedir=./test-prefix/include --oldincludedir=./test-prefix/usr/include --datarootdir=./test-prefix/share --datadir=./test-prefix/share_root --infodir=./test-prefix/info --localedir=./test-prefix/locale --mandir=./test-prefix/man --docdir=./test-prefix/doc/package --htmldir=./test-prefix/doc --dvidir=./test-prefix/doc --pdfdir=./test-prefix/doc --psdir=./test-prefix/doc && rm -rf ./test-prefix",
]
common_test_cases = [
"",
"-k",
"--keep",
"-v",
"--verbose",
"--nocache",
"--zones=task_gen",
"--zones=deps",
"--zones=tasks",
"--no-task-lines",
]
test_case_mappings = {
'basic' : basic_test_cases,
'configure' : config_test_cases,
'build' : build_test_cases,
'step' : step_test_cases,
'install' : install_test_cases,
}
waf_string = sys.executable + ' waf'
cmd_execute_list = []
for cmd in cmds:
if cmd == 'basic':
cmd_list = []
else:
cmd_list = ['%s %s %s' % (waf_string, cmd, option) for option in common_test_cases ]
if cmd in test_case_mappings:
cmd_list += ['%s %s %s' % (waf_string, cmd, option) for option in test_case_mappings[cmd] ]
if cmd == 'basic':
cmd_list.append('%s configure && %s build && %s --run scratch/myfirst' % tuple([waf_string]*3))
cmd_list.append('%s configure && %s build && %s --pyrun scratch/myfirst.py' % tuple([waf_string]*3))
if cmd == 'build':
cmd_list = replace(waf_string+' configure', waf_string+' clean', cmd_list)
cmd_list.append('%s configure --enable-gcov && %s build --lcov-report && %s clean' % tuple([waf_string]*3))
elif cmd == 'configure':
cmd_list = replace(None, waf_string+' distclean', cmd_list)
elif cmd == 'distcheck':
cmd_list = replace(waf_string+' dist', 'rm -rf ns-3*.tar.bz2', cmd_list)
elif cmd == 'docs':
cmd_list = replace(waf_string+' configure', waf_string+' distclean', cmd_list)
elif cmd == 'install':
cmd_list = replace(waf_string+' configure', waf_string+' uninstall', cmd_list)
elif cmd == 'list':
cmd_list = replace(waf_string+' configure', waf_string +' distclean', cmd_list)
elif cmd == 'shell':
cmd_list = replace(waf_string+' configure', waf_string+' distclean', cmd_list)
elif cmd == 'step':
cmd_list = replace(waf_string+' configure', waf_string+' distclean', cmd_list)
elif cmd == 'uninstall':
cmd_list = replace(waf_string+' install', None, cmd_list)
cmd_execute_list += cmd_list
return runner.runtests(cmd_execute_list)
if __name__ == '__main__':
sys.exit(main(sys.argv)) | gpl-2.0 |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/ctypes/test/test_unicode.py | 102 | 1761 | import unittest
import ctypes
from ctypes.test import need_symbol
import _ctypes_test
@need_symbol('c_wchar')
class UnicodeTestCase(unittest.TestCase):
def test_wcslen(self):
dll = ctypes.CDLL(_ctypes_test.__file__)
wcslen = dll.my_wcslen
wcslen.argtypes = [ctypes.c_wchar_p]
self.assertEqual(wcslen("abc"), 3)
self.assertEqual(wcslen("ab\u2070"), 3)
self.assertRaises(ctypes.ArgumentError, wcslen, b"ab\xe4")
def test_buffers(self):
buf = ctypes.create_unicode_buffer("abc")
self.assertEqual(len(buf), 3+1)
buf = ctypes.create_unicode_buffer("ab\xe4\xf6\xfc")
self.assertEqual(buf[:], "ab\xe4\xf6\xfc\0")
self.assertEqual(buf[::], "ab\xe4\xf6\xfc\0")
self.assertEqual(buf[::-1], '\x00\xfc\xf6\xe4ba')
self.assertEqual(buf[::2], 'a\xe4\xfc')
self.assertEqual(buf[6:5:-1], "")
func = ctypes.CDLL(_ctypes_test.__file__)._testfunc_p_p
class StringTestCase(UnicodeTestCase):
def setUp(self):
func.argtypes = [ctypes.c_char_p]
func.restype = ctypes.c_char_p
def tearDown(self):
func.argtypes = None
func.restype = ctypes.c_int
def test_func(self):
self.assertEqual(func(b"abc\xe4"), b"abc\xe4")
def test_buffers(self):
buf = ctypes.create_string_buffer(b"abc")
self.assertEqual(len(buf), 3+1)
buf = ctypes.create_string_buffer(b"ab\xe4\xf6\xfc")
self.assertEqual(buf[:], b"ab\xe4\xf6\xfc\0")
self.assertEqual(buf[::], b"ab\xe4\xf6\xfc\0")
self.assertEqual(buf[::-1], b'\x00\xfc\xf6\xe4ba')
self.assertEqual(buf[::2], b'a\xe4\xfc')
self.assertEqual(buf[6:5:-1], b"")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jonntd/mpv | bootstrap.py | 19 | 1483 | #!/usr/bin/env python
# This script simply downloads waf to the current directory
from __future__ import print_function
import os, sys, stat, hashlib, subprocess
WAFRELEASE = "waf-1.8.12"
WAFURLS = ["https://waf.io/" + WAFRELEASE,
"http://www.freehackers.org/~tnagy/release/" + WAFRELEASE]
SHA256HASH = "01bf2beab2106d1558800c8709bc2c8e496d3da4a2ca343fe091f22fca60c98b"
if os.path.exists("waf"):
wafver = subprocess.check_output([sys.executable, './waf', '--version']).decode()
if WAFRELEASE.split('-')[1] == wafver.split(' ')[1]:
print("Found 'waf', skipping download.")
sys.exit(0)
try:
from urllib.request import urlopen, URLError
except:
from urllib2 import urlopen, URLError
waf = None
for WAFURL in WAFURLS:
try:
print("Downloading {}...".format(WAFURL))
waf = urlopen(WAFURL).read()
break
except URLError:
print("Download failed.")
if not waf:
print("Could not download {}.".format(WAFRELEASE))
sys.exit(1)
if SHA256HASH == hashlib.sha256(waf).hexdigest():
with open("waf", "wb") as wf:
wf.write(waf)
os.chmod("waf", os.stat("waf").st_mode | stat.S_IXUSR)
print("Checksum verified.")
else:
print("The checksum of the downloaded file does not match!")
print(" - got: {}".format(hashlib.sha256(waf).hexdigest()))
print(" - expected: {}".format(SHA256HASH))
print("Please download and verify the file manually.")
sys.exit(1)
| gpl-2.0 |
moreati/django | django/utils/baseconv.py | 650 | 2982 | # Copyright (c) 2010 Guilherme Gondim. All rights reserved.
# Copyright (c) 2009 Simon Willison. All rights reserved.
# Copyright (c) 2002 Drew Perttula. All rights reserved.
#
# License:
# Python Software Foundation License version 2
#
# See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF
# ALL WARRANTIES.
#
# This Baseconv distribution contains no GNU General Public Licensed (GPLed)
# code so it may be used in proprietary projects just like prior ``baseconv``
# distributions.
#
# All trademarks referenced herein are property of their respective holders.
#
"""
Convert numbers from base 10 integers to base X strings and back again.
Sample usage::
>>> base20 = BaseConverter('0123456789abcdefghij')
>>> base20.encode(1234)
'31e'
>>> base20.decode('31e')
1234
>>> base20.encode(-1234)
'-31e'
>>> base20.decode('-31e')
-1234
>>> base11 = BaseConverter('0123456789-', sign='$')
>>> base11.encode('$1234')
'$-22'
>>> base11.decode('$-22')
'$1234'
"""
BASE2_ALPHABET = '01'
BASE16_ALPHABET = '0123456789ABCDEF'
BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz'
BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz'
BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
BASE64_ALPHABET = BASE62_ALPHABET + '-_'
class BaseConverter(object):
decimal_digits = '0123456789'
def __init__(self, digits, sign='-'):
self.sign = sign
self.digits = digits
if sign in self.digits:
raise ValueError('Sign character found in converter base digits.')
def __repr__(self):
return "<BaseConverter: base%s (%s)>" % (len(self.digits), self.digits)
def encode(self, i):
neg, value = self.convert(i, self.decimal_digits, self.digits, '-')
if neg:
return self.sign + value
return value
def decode(self, s):
neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign)
if neg:
value = '-' + value
return int(value)
def convert(self, number, from_digits, to_digits, sign):
if str(number)[0] == sign:
number = str(number)[1:]
neg = 1
else:
neg = 0
# make an integer out of the number
x = 0
for digit in str(number):
x = x * len(from_digits) + from_digits.index(digit)
# create the result in base 'len(to_digits)'
if x == 0:
res = to_digits[0]
else:
res = ''
while x > 0:
digit = x % len(to_digits)
res = to_digits[digit] + res
x = int(x // len(to_digits))
return neg, res
base2 = BaseConverter(BASE2_ALPHABET)
base16 = BaseConverter(BASE16_ALPHABET)
base36 = BaseConverter(BASE36_ALPHABET)
base56 = BaseConverter(BASE56_ALPHABET)
base62 = BaseConverter(BASE62_ALPHABET)
base64 = BaseConverter(BASE64_ALPHABET, sign='$')
| bsd-3-clause |
chacoroot/planetary | addons/base_import/tests/test_cases.py | 189 | 15021 | # -*- encoding: utf-8 -*-
import unittest2
from openerp.tests.common import TransactionCase
from .. import models
ID_FIELD = {
'id': 'id',
'name': 'id',
'string': "External ID",
'required': False,
'fields': [],
}
def make_field(name='value', string='unknown', required=False, fields=[]):
return [
ID_FIELD,
{'id': name, 'name': name, 'string': string, 'required': required, 'fields': fields},
]
def sorted_fields(fields):
""" recursively sort field lists to ease comparison """
recursed = [dict(field, fields=sorted_fields(field['fields'])) for field in fields]
return sorted(recursed, key=lambda field: field['id'])
class BaseImportCase(TransactionCase):
def assertEqualFields(self, fields1, fields2):
self.assertEqual(sorted_fields(fields1), sorted_fields(fields2))
class test_basic_fields(BaseImportCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_base(self):
""" A basic field is not required """
self.assertEqualFields(self.get_fields('char'), make_field())
def test_required(self):
""" Required fields should be flagged (so they can be fill-required) """
self.assertEqualFields(self.get_fields('char.required'), make_field(required=True))
def test_readonly(self):
""" Readonly fields should be filtered out"""
self.assertEqualFields(self.get_fields('char.readonly'), [ID_FIELD])
def test_readonly_states(self):
""" Readonly fields with states should not be filtered out"""
self.assertEqualFields(self.get_fields('char.states'), make_field())
def test_readonly_states_noreadonly(self):
""" Readonly fields with states having nothing to do with
readonly should still be filtered out"""
self.assertEqualFields(self.get_fields('char.noreadonly'), [ID_FIELD])
def test_readonly_states_stillreadonly(self):
""" Readonly fields with readonly states leaving them readonly
always... filtered out"""
self.assertEqualFields(self.get_fields('char.stillreadonly'), [ID_FIELD])
def test_m2o(self):
""" M2O fields should allow import of themselves (name_get),
their id and their xid"""
self.assertEqualFields(self.get_fields('m2o'), make_field(fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]))
def test_m2o_required(self):
""" If an m2o field is required, its three sub-fields are
required as well (the client has to handle that: requiredness
is id-based)
"""
self.assertEqualFields(self.get_fields('m2o.required'), make_field(required=True, fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': True, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': True, 'fields': []},
]))
class test_o2m(BaseImportCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_shallow(self):
self.assertEqualFields(self.get_fields('o2m'), make_field(fields=[
ID_FIELD,
# FIXME: should reverse field be ignored?
{'id': 'parent_id', 'name': 'parent_id', 'string': 'unknown', 'required': False, 'fields': [
{'id': 'parent_id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'parent_id', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]},
{'id': 'value', 'name': 'value', 'string': 'unknown', 'required': False, 'fields': []},
]))
class test_match_headers_single(TransactionCase):
def test_match_by_name(self):
match = self.registry('base_import.import')._match_header(
'f0', [{'name': 'f0'}], {})
self.assertEqual(match, [{'name': 'f0'}])
def test_match_by_string(self):
match = self.registry('base_import.import')._match_header(
'some field', [{'name': 'bob', 'string': "Some Field"}], {})
self.assertEqual(match, [{'name': 'bob', 'string': "Some Field"}])
def test_nomatch(self):
match = self.registry('base_import.import')._match_header(
'should not be', [{'name': 'bob', 'string': "wheee"}], {})
self.assertEqual(match, [])
def test_recursive_match(self):
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f1', [f], {})
self.assertEqual(match, [f, f['fields'][1]])
def test_recursive_nomatch(self):
""" Match first level, fail to match second level
"""
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f2', [f], {})
self.assertEqual(match, [])
class test_match_headers_multiple(TransactionCase):
def test_noheaders(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
[], [], {}),
(None, None)
)
def test_nomatch(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter([
['foo', 'bar', 'baz', 'qux'],
['v1', 'v2', 'v3', 'v4'],
]),
[],
{'headers': True}),
(
['foo', 'bar', 'baz', 'qux'],
dict.fromkeys(range(4))
)
)
def test_mixed(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter(['foo bar baz qux/corge'.split()]),
[
{'name': 'bar', 'string': 'Bar'},
{'name': 'bob', 'string': 'Baz'},
{'name': 'qux', 'string': 'Qux', 'fields': [
{'name': 'corge', 'fields': []},
]}
],
{'headers': True}),
(['foo', 'bar', 'baz', 'qux/corge'], {
0: None,
1: ['bar'],
2: ['bob'],
3: ['qux', 'corge'],
})
)
class test_preview(TransactionCase):
def make_import(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'res.users',
'file': u"로그인,언어\nbob,1\n".encode('euc_kr'),
})
return Import, id
def test_encoding(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': 'foo',
'separator': ',',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': 'bob',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_success(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
'headers': True,
})
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue'], 2: None})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
# Order depends on iteration order of fields_get
self.assertItemsEqual(result['fields'], [
ID_FIELD,
{'id': 'name', 'name': 'name', 'string': 'Name', 'required':False, 'fields': []},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required':True, 'fields': []},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required':False, 'fields': []},
])
self.assertEqual(result['preview'], [
['foo', '1', '2'],
['bar', '3', '4'],
['qux', '5', '6'],
])
# Ensure we only have the response fields we expect
self.assertItemsEqual(result.keys(), ['matches', 'headers', 'fields', 'preview'])
class test_convert_import_data(TransactionCase):
""" Tests conversion of base_import.import input into data which
can be fed to Model.import_data
"""
def test_all(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue', 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '1', '2'),
('bar', '3', '4'),
('qux', '5', '6'),
])
def test_filtered(self):
""" If ``False`` is provided as field mapping for a column,
that column should be removed from importable data
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('bar', '4'),
('qux', '6'),
])
def test_norow(self):
""" If a row is composed only of empty values (due to having
filtered out non-empty values from it), it should be removed
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
',3,\n'
',5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('', '6'),
])
def test_empty_rows(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value\n'
'foo,1\n'
'\n'
'bar,2\n'
' \n'
'\t \n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue'])
self.assertItemsEqual(data, [
('foo', '1'),
('bar', '2'),
])
def test_nofield(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [],
{'quoting': '"', 'separator': ',', 'headers': True,})
def test_falsefields(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [False, False, False],
{'quoting': '"', 'separator': ',', 'headers': True,})
class test_failures(TransactionCase):
def test_big_attachments(self):
"""
Ensure big fields (e.g. b64-encoded image data) can be imported and
we're not hitting limits of the default CSV parser config
"""
import csv, cStringIO
from PIL import Image
im = Image.new('RGB', (1920, 1080))
fout = cStringIO.StringIO()
writer = csv.writer(fout, dialect=None)
writer.writerows([
['name', 'db_datas'],
['foo', im.tobytes().encode('base64')]
])
Import = self.env['base_import.import']
imp = Import.create({
'res_model': 'ir.attachment',
'file': fout.getvalue()
})
[results] = imp.do(
['name', 'db_datas'],
{'headers': True, 'separator': ',', 'quoting': '"'})
self.assertFalse(
results, "results should be empty on successful import")
| agpl-3.0 |
BobRazoswki/ddp | wp-content/themes/ddp/config/node_modules/node-gyp/gyp/tools/pretty_vcproj.py | 2637 | 9586 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Make the format of a vcproj really pretty.
This script normalize and sort an xml. It also fetches all the properties
inside linked vsprops and include them explicitly in the vcproj.
It outputs the resulting xml to stdout.
"""
__author__ = 'nsylvain (Nicolas Sylvain)'
import os
import sys
from xml.dom.minidom import parse
from xml.dom.minidom import Node
REPLACEMENTS = dict()
ARGUMENTS = None
class CmpTuple(object):
"""Compare function between 2 tuple."""
def __call__(self, x, y):
return cmp(x[0], y[0])
class CmpNode(object):
"""Compare function between 2 xml nodes."""
def __call__(self, x, y):
def get_string(node):
node_string = "node"
node_string += node.nodeName
if node.nodeValue:
node_string += node.nodeValue
if node.attributes:
# We first sort by name, if present.
node_string += node.getAttribute("Name")
all_nodes = []
for (name, value) in node.attributes.items():
all_nodes.append((name, value))
all_nodes.sort(CmpTuple())
for (name, value) in all_nodes:
node_string += name
node_string += value
return node_string
return cmp(get_string(x), get_string(y))
def PrettyPrintNode(node, indent=0):
if node.nodeType == Node.TEXT_NODE:
if node.data.strip():
print '%s%s' % (' '*indent, node.data.strip())
return
if node.childNodes:
node.normalize()
# Get the number of attributes
attr_count = 0
if node.attributes:
attr_count = node.attributes.length
# Print the main tag
if attr_count == 0:
print '%s<%s>' % (' '*indent, node.nodeName)
else:
print '%s<%s' % (' '*indent, node.nodeName)
all_attributes = []
for (name, value) in node.attributes.items():
all_attributes.append((name, value))
all_attributes.sort(CmpTuple())
for (name, value) in all_attributes:
print '%s %s="%s"' % (' '*indent, name, value)
print '%s>' % (' '*indent)
if node.nodeValue:
print '%s %s' % (' '*indent, node.nodeValue)
for sub_node in node.childNodes:
PrettyPrintNode(sub_node, indent=indent+2)
print '%s</%s>' % (' '*indent, node.nodeName)
def FlattenFilter(node):
"""Returns a list of all the node and sub nodes."""
node_list = []
if (node.attributes and
node.getAttribute('Name') == '_excluded_files'):
# We don't add the "_excluded_files" filter.
return []
for current in node.childNodes:
if current.nodeName == 'Filter':
node_list.extend(FlattenFilter(current))
else:
node_list.append(current)
return node_list
def FixFilenames(filenames, current_directory):
new_list = []
for filename in filenames:
if filename:
for key in REPLACEMENTS:
filename = filename.replace(key, REPLACEMENTS[key])
os.chdir(current_directory)
filename = filename.strip('"\' ')
if filename.startswith('$'):
new_list.append(filename)
else:
new_list.append(os.path.abspath(filename))
return new_list
def AbsoluteNode(node):
"""Makes all the properties we know about in this node absolute."""
if node.attributes:
for (name, value) in node.attributes.items():
if name in ['InheritedPropertySheets', 'RelativePath',
'AdditionalIncludeDirectories',
'IntermediateDirectory', 'OutputDirectory',
'AdditionalLibraryDirectories']:
# We want to fix up these paths
path_list = value.split(';')
new_list = FixFilenames(path_list, os.path.dirname(ARGUMENTS[1]))
node.setAttribute(name, ';'.join(new_list))
if not value:
node.removeAttribute(name)
def CleanupVcproj(node):
"""For each sub node, we call recursively this function."""
for sub_node in node.childNodes:
AbsoluteNode(sub_node)
CleanupVcproj(sub_node)
# Normalize the node, and remove all extranous whitespaces.
for sub_node in node.childNodes:
if sub_node.nodeType == Node.TEXT_NODE:
sub_node.data = sub_node.data.replace("\r", "")
sub_node.data = sub_node.data.replace("\n", "")
sub_node.data = sub_node.data.rstrip()
# Fix all the semicolon separated attributes to be sorted, and we also
# remove the dups.
if node.attributes:
for (name, value) in node.attributes.items():
sorted_list = sorted(value.split(';'))
unique_list = []
for i in sorted_list:
if not unique_list.count(i):
unique_list.append(i)
node.setAttribute(name, ';'.join(unique_list))
if not value:
node.removeAttribute(name)
if node.childNodes:
node.normalize()
# For each node, take a copy, and remove it from the list.
node_array = []
while node.childNodes and node.childNodes[0]:
# Take a copy of the node and remove it from the list.
current = node.childNodes[0]
node.removeChild(current)
# If the child is a filter, we want to append all its children
# to this same list.
if current.nodeName == 'Filter':
node_array.extend(FlattenFilter(current))
else:
node_array.append(current)
# Sort the list.
node_array.sort(CmpNode())
# Insert the nodes in the correct order.
for new_node in node_array:
# But don't append empty tool node.
if new_node.nodeName == 'Tool':
if new_node.attributes and new_node.attributes.length == 1:
# This one was empty.
continue
if new_node.nodeName == 'UserMacro':
continue
node.appendChild(new_node)
def GetConfiguationNodes(vcproj):
#TODO(nsylvain): Find a better way to navigate the xml.
nodes = []
for node in vcproj.childNodes:
if node.nodeName == "Configurations":
for sub_node in node.childNodes:
if sub_node.nodeName == "Configuration":
nodes.append(sub_node)
return nodes
def GetChildrenVsprops(filename):
dom = parse(filename)
if dom.documentElement.attributes:
vsprops = dom.documentElement.getAttribute('InheritedPropertySheets')
return FixFilenames(vsprops.split(';'), os.path.dirname(filename))
return []
def SeekToNode(node1, child2):
# A text node does not have properties.
if child2.nodeType == Node.TEXT_NODE:
return None
# Get the name of the current node.
current_name = child2.getAttribute("Name")
if not current_name:
# There is no name. We don't know how to merge.
return None
# Look through all the nodes to find a match.
for sub_node in node1.childNodes:
if sub_node.nodeName == child2.nodeName:
name = sub_node.getAttribute("Name")
if name == current_name:
return sub_node
# No match. We give up.
return None
def MergeAttributes(node1, node2):
# No attributes to merge?
if not node2.attributes:
return
for (name, value2) in node2.attributes.items():
# Don't merge the 'Name' attribute.
if name == 'Name':
continue
value1 = node1.getAttribute(name)
if value1:
# The attribute exist in the main node. If it's equal, we leave it
# untouched, otherwise we concatenate it.
if value1 != value2:
node1.setAttribute(name, ';'.join([value1, value2]))
else:
# The attribute does nto exist in the main node. We append this one.
node1.setAttribute(name, value2)
# If the attribute was a property sheet attributes, we remove it, since
# they are useless.
if name == 'InheritedPropertySheets':
node1.removeAttribute(name)
def MergeProperties(node1, node2):
MergeAttributes(node1, node2)
for child2 in node2.childNodes:
child1 = SeekToNode(node1, child2)
if child1:
MergeProperties(child1, child2)
else:
node1.appendChild(child2.cloneNode(True))
def main(argv):
"""Main function of this vcproj prettifier."""
global ARGUMENTS
ARGUMENTS = argv
# check if we have exactly 1 parameter.
if len(argv) < 2:
print ('Usage: %s "c:\\path\\to\\vcproj.vcproj" [key1=value1] '
'[key2=value2]' % argv[0])
return 1
# Parse the keys
for i in range(2, len(argv)):
(key, value) = argv[i].split('=')
REPLACEMENTS[key] = value
# Open the vcproj and parse the xml.
dom = parse(argv[1])
# First thing we need to do is find the Configuration Node and merge them
# with the vsprops they include.
for configuration_node in GetConfiguationNodes(dom.documentElement):
# Get the property sheets associated with this configuration.
vsprops = configuration_node.getAttribute('InheritedPropertySheets')
# Fix the filenames to be absolute.
vsprops_list = FixFilenames(vsprops.strip().split(';'),
os.path.dirname(argv[1]))
# Extend the list of vsprops with all vsprops contained in the current
# vsprops.
for current_vsprops in vsprops_list:
vsprops_list.extend(GetChildrenVsprops(current_vsprops))
# Now that we have all the vsprops, we need to merge them.
for current_vsprops in vsprops_list:
MergeProperties(configuration_node,
parse(current_vsprops).documentElement)
# Now that everything is merged, we need to cleanup the xml.
CleanupVcproj(dom.documentElement)
# Finally, we use the prett xml function to print the vcproj back to the
# user.
#print dom.toprettyxml(newl="\n")
PrettyPrintNode(dom.documentElement)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
Kickoman/mamka-python | www/cgi-bin/get_post.py | 1 | 3256 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import json
err = open('err.log', 'a')
err = sys.stderr
def enc_print(string='', encoding='utf8'):
sys.stdout.buffer.write(string.encode(encoding) + b'\n')
def get_content(path):
content = ""
with open(path) as content_file:
content = content_file.read()
return content
catalog = get_content('storage/news.json')
catalog_dict = json.loads(catalog)
comments = get_content('storage/comments.json')
comments_dict = json.loads(comments)
def getPostComments(x):
x = int(x)
cur_comments = comments_dict['comments'][x]
comment_block = get_content('defaults/comment_block.html')
result = ""
for i in cur_comments['comments']:
cur_block = comment_block.format(**i)
result += cur_block
if result == "":
result = "<p>Камэнтароў няма! Вы хто такія?</p>"
return result
def getPost(x):
post = catalog_dict['news'][x]
res = {'postid' : x,
'title' : post['title'],
'value' : post['value'],
'author' : post['author'],
'comments' : getPostComments(x)}
return res
def getPage(page):
page = int(page)
last = len(catalog_dict['news']) - 10 * (page - 1) - 1
posts = []
for i in range(min(10, last + 1)):
post = getPost(last - i)
posts.append(post)
posts.reverse()
err.write('\n\n\n' + str(posts) + '\n\n\n')
return posts
def computePages(currentPage):
currentPage = int(currentPage)
all_pages = len(catalog_dict['news']) // 10 + (1 if len(catalog_dict['news']) % 10 != 0 else 0)
page_bar = get_content('defaults/pages_bar.html')
result = {}
result['current'] = '<a class="btm-page selected">' + str(currentPage) + '</a>'
if currentPage == 1:
result['before'] = ""
elif currentPage == 2:
result['before'] = '<a href="index.py?page=1" class="btm-page">1</a>'
elif currentPage == 3:
result['before'] = '<a href="index.py?page=1" class="btm-page">1</a> <a href="index.py?page=2" class="btm-page">2</a>'
else:
result['before'] = '<a href="index.py?page=1" class="btm-page">1</a> <div class="btm-dots">...</div> <a href="index.py?page=' +\
str(currentPage - 1) + '" class="btm-page">' + str(currentPage - 1) + '</a>'
if currentPage == all_pages:
result['after'] = ""
elif currentPage == all_pages - 1:
result['after'] = '<a href="index.py?page=' + str(all_pages) + '" class="btm-page">' +\
str(all_pages) + '</a>'
elif currentPage == all_pages - 2:
result['after'] = '<a href="index.py?page=' + str(all_pages - 1) + '" class="btm-page">' +\
str(all_pages - 1) + '</a> <a href="index.py?page=' + str(all_pages) + '" class="btm-page">' +\
str(all_pages) + '</a>'
else:
result['after'] = '<a href="index.py?page=' + str(currentPage + 1) + '" class="btm-page">' + str(currentPage + 1) + '</a>' +\
'<div class="btm-dots">...</div> <a href="index.py?page=' + str(all_pages) + '" class="btm-page">' + str(all_pages) + '</a>'
return page_bar.format(**result)
| gpl-3.0 |
danimajo/pineapple_pdf | werkzeug/testsuite/datastructures.py | 97 | 27488 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.datastructures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the functionality of the provided Werkzeug
datastructures.
TODO:
- FileMultiDict
- Immutable types undertested
- Split up dict tests
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
import pickle
from contextlib import contextmanager
from copy import copy
from werkzeug import datastructures
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
iterlistvalues, text_type
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.exceptions import BadRequestKeyError
class NativeItermethodsTestCase(WerkzeugTestCase):
def test_basic(self):
@datastructures.native_itermethods(['keys', 'values', 'items'])
class StupidDict(object):
def keys(self, multi=1):
return iter(['a', 'b', 'c'] * multi)
def values(self, multi=1):
return iter([1, 2, 3] * multi)
def items(self, multi=1):
return iter(zip(iterkeys(self, multi=multi),
itervalues(self, multi=multi)))
d = StupidDict()
expected_keys = ['a', 'b', 'c']
expected_values = [1, 2, 3]
expected_items = list(zip(expected_keys, expected_values))
self.assert_equal(list(iterkeys(d)), expected_keys)
self.assert_equal(list(itervalues(d)), expected_values)
self.assert_equal(list(iteritems(d)), expected_items)
self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2)
self.assert_equal(list(itervalues(d, 2)), expected_values * 2)
self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
class MutableMultiDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_pickle(self):
cls = self.storage_class
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
d = cls()
d.setlist(b'foo', [1, 2, 3, 4])
d.setlist(b'bar', b'foo bar baz'.split())
s = pickle.dumps(d, protocol)
ud = pickle.loads(s)
self.assert_equal(type(ud), type(d))
self.assert_equal(ud, d)
self.assert_equal(pickle.loads(
s.replace(b'werkzeug.datastructures', b'werkzeug')), d)
ud[b'newkey'] = b'bla'
self.assert_not_equal(ud, d)
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
# simple getitem gives the first value
self.assert_equal(md['a'], 1)
self.assert_equal(md['c'], 3)
with self.assert_raises(KeyError):
md['e']
self.assert_equal(md.get('a'), 1)
# list getitem
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
self.assert_equal(md.getlist('d'), [3, 4])
# do not raise if key not found
self.assert_equal(md.getlist('x'), [])
# simple setitem overwrites all values
md['a'] = 42
self.assert_equal(md.getlist('a'), [42])
# list setitem
md.setlist('a', [1, 2, 3])
self.assert_equal(md['a'], 1)
self.assert_equal(md.getlist('a'), [1, 2, 3])
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist('a', l1)
del l1[:]
self.assert_equal(md['a'], 1)
# setdefault, setlistdefault
self.assert_equal(md.setdefault('u', 23), 23)
self.assert_equal(md.getlist('u'), [23])
del md['u']
md.setlist('u', [-1, -2])
# delitem
del md['u']
with self.assert_raises(KeyError):
md['u']
del md['d']
self.assert_equal(md.getlist('d'), [])
# keys, values, items, lists
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(md.items())),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.items(multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md))),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md, multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.lists())),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
self.assert_equal(list(sorted(iterlists(md))),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
# copy method
c = md.copy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# copy method 2
c = copy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# update with a multidict
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
self.assert_equal(md.getlist('y'), [0])
# update with a regular dict
md = c
od = {'a': 4, 'y': 0}
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
self.assert_equal(md.getlist('y'), [0])
# pop, poplist, popitem, popitemlist
self.assert_equal(md.pop('y'), 0)
assert 'y' not in md
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
assert 'a' not in md
self.assert_equal(md.poplist('missing'), [])
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [('b', 2), ('c', 3)]
popped = md.popitemlist()
assert popped in [('b', [2]), ('c', [3])]
# type conversion
md = self.storage_class({'a': '4', 'b': ['2', '3']})
self.assert_equal(md.get('a', type=int), 4)
self.assert_equal(md.getlist('b', type=int), [2, 3])
# repr
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add('c', '42')
md.add('c', '23')
self.assert_equal(md.getlist('c'), ['42', '23'])
md.add('c', 'blah')
self.assert_equal(md.getlist('c', type=int), [42, 23])
# setdefault
md = self.storage_class()
md.setdefault('x', []).append(42)
md.setdefault('x', []).append(23)
self.assert_equal(md['x'], [42, 23])
# to dict
md = self.storage_class()
md['foo'] = 42
md.add('bar', 1)
md.add('bar', 2)
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
# popitem from empty dict
with self.assert_raises(KeyError):
self.storage_class().popitem()
with self.assert_raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with self.assert_raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md['foo'] = 42
md.setlist('foo', [1, 2])
self.assert_equal(md.getlist('foo'), [1, 2])
class ImmutableDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_follows_dict_interface(self):
cls = self.storage_class
data = {'foo': 1, 'bar': 2, 'baz': 3}
d = cls(data)
self.assert_equal(d['foo'], 1)
self.assert_equal(d['bar'], 2)
self.assert_equal(d['baz'], 3)
self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo'])
self.assert_true('foo' in d)
self.assert_true('foox' not in d)
self.assert_equal(len(d), 3)
def test_copies_are_mutable(self):
cls = self.storage_class
immutable = cls({'a': 1})
with self.assert_raises(TypeError):
immutable.pop('a')
mutable = immutable.copy()
mutable.pop('a')
self.assert_true('a' in immutable)
self.assert_true(mutable is not immutable)
self.assert_true(copy(immutable) is immutable)
def test_dict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': 1, 'b': 2})
immutable2 = cls({'a': 2, 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableTypeConversionDict
class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableMultiDict
def test_multidict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': [1, 2], 'b': 2})
immutable2 = cls({'a': [1], 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableDict
class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableOrderedMultiDict
def test_ordered_multidict_is_hashable(self):
a = self.storage_class([('a', 1), ('b', 1), ('a', 2)])
b = self.storage_class([('a', 1), ('a', 2), ('b', 1)])
self.assert_not_equal(hash(a), hash(b))
class MultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.MultiDict
def test_multidict_pop(self):
make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]})
d = make_d()
self.assert_equal(d.pop('foo'), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foo', 32), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foos', 32), 32)
assert d
with self.assert_raises(KeyError):
d.pop('foos')
def test_setlistdefault(self):
md = self.storage_class()
self.assert_equal(md.setlistdefault('u', [-1, -2]), [-1, -2])
self.assert_equal(md.getlist('u'), [-1, -2])
self.assert_equal(md['u'], -1)
def test_iter_interfaces(self):
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
self.assert_equal(list(zip(md.keys(), md.listvalues())),
list(md.lists()))
self.assert_equal(list(zip(md, iterlistvalues(md))),
list(iterlists(md)))
self.assert_equal(list(zip(iterkeys(md), iterlistvalues(md))),
list(iterlists(md)))
class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.OrderedMultiDict
def test_ordered_interface(self):
cls = self.storage_class
d = cls()
assert not d
d.add('foo', 'bar')
self.assert_equal(len(d), 1)
d.add('foo', 'baz')
self.assert_equal(len(d), 1)
self.assert_equal(list(iteritems(d)), [('foo', 'bar')])
self.assert_equal(list(d), ['foo'])
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 'bar'), ('foo', 'baz')])
del d['foo']
assert not d
self.assert_equal(len(d), 0)
self.assert_equal(list(d), [])
d.update([('foo', 1), ('foo', 2), ('bar', 42)])
d.add('foo', 3)
self.assert_equal(d.getlist('foo'), [1, 2, 3])
self.assert_equal(d.getlist('bar'), [42])
self.assert_equal(list(iteritems(d)), [('foo', 1), ('bar', 42)])
expected = ['foo', 'bar']
self.assert_sequence_equal(list(d.keys()), expected)
self.assert_sequence_equal(list(d), expected)
self.assert_sequence_equal(list(iterkeys(d)), expected)
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)])
self.assert_equal(len(d), 2)
self.assert_equal(d.pop('foo'), 1)
assert d.pop('blafasel', None) is None
self.assert_equal(d.pop('blafasel', 42), 42)
self.assert_equal(len(d), 1)
self.assert_equal(d.poplist('bar'), [42])
assert not d
d.get('missingkey') is None
d.add('foo', 42)
d.add('foo', 23)
d.add('bar', 2)
d.add('foo', 42)
self.assert_equal(d, datastructures.MultiDict(d))
id = self.storage_class(d)
self.assert_equal(d, id)
d.add('foo', 2)
assert d != id
d.update({'blah': [1, 2, 3]})
self.assert_equal(d['blah'], 1)
self.assert_equal(d.getlist('blah'), [1, 2, 3])
# setlist works
d = self.storage_class()
d['foo'] = 42
d.setlist('foo', [1, 2])
self.assert_equal(d.getlist('foo'), [1, 2])
with self.assert_raises(BadRequestKeyError):
d.pop('missing')
with self.assert_raises(BadRequestKeyError):
d['missing']
# popping
d = self.storage_class()
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitem(), ('foo', 23))
with self.assert_raises(BadRequestKeyError):
d.popitem()
assert not d
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitemlist(), ('foo', [23, 42, 1]))
with self.assert_raises(BadRequestKeyError):
d.popitemlist()
def test_iterables(self):
a = datastructures.MultiDict((("key_a", "value_a"),))
b = datastructures.MultiDict((("key_b", "value_b"),))
ab = datastructures.CombinedMultiDict((a,b))
self.assert_equal(sorted(ab.lists()), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(ab.listvalues()), [['value_a'], ['value_b']])
self.assert_equal(sorted(ab.keys()), ["key_a", "key_b"])
self.assert_equal(sorted(iterlists(ab)), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(iterlistvalues(ab)), [['value_a'], ['value_b']])
self.assert_equal(sorted(iterkeys(ab)), ["key_a", "key_b"])
class CombinedMultiDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CombinedMultiDict
def test_basic_interface(self):
d1 = datastructures.MultiDict([('foo', '1')])
d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')])
d = self.storage_class([d1, d2])
# lookup
self.assert_equal(d['foo'], '1')
self.assert_equal(d['bar'], '2')
self.assert_equal(d.getlist('bar'), ['2', '3'])
self.assert_equal(sorted(d.items()),
[('bar', '2'), ('foo', '1')])
self.assert_equal(sorted(d.items(multi=True)),
[('bar', '2'), ('bar', '3'), ('foo', '1')])
assert 'missingkey' not in d
assert 'foo' in d
# type lookup
self.assert_equal(d.get('foo', type=int), 1)
self.assert_equal(d.getlist('bar', type=int), [2, 3])
# get key errors for missing stuff
with self.assert_raises(KeyError):
d['missing']
# make sure that they are immutable
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# copies are immutable
d = d.copy()
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# make sure lists merges
md1 = datastructures.MultiDict((("foo", "bar"),))
md2 = datastructures.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
self.assert_equal(list(iterlists(x)), [('foo', ['bar', 'blafasel'])])
class HeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.Headers
def test_basic_interface(self):
headers = self.storage_class()
headers.add('Content-Type', 'text/plain')
headers.add('X-Foo', 'bar')
assert 'x-Foo' in headers
assert 'Content-type' in headers
headers['Content-Type'] = 'foo/bar'
self.assert_equal(headers['Content-Type'], 'foo/bar')
self.assert_equal(len(headers.getlist('Content-Type')), 1)
# list conversion
self.assert_equal(headers.to_wsgi_list(), [
('Content-Type', 'foo/bar'),
('X-Foo', 'bar')
])
self.assert_equal(str(headers), (
"Content-Type: foo/bar\r\n"
"X-Foo: bar\r\n"
"\r\n"))
self.assert_equal(str(self.storage_class()), "\r\n")
# extended add
headers.add('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(headers['Content-Disposition'],
'attachment; filename=foo')
headers.add('x', 'y', z='"')
self.assert_equal(headers['x'], r'y; z="\""')
def test_defaults_and_conversion(self):
# defaults
headers = self.storage_class([
('Content-Type', 'text/plain'),
('X-Foo', 'bar'),
('X-Bar', '1'),
('X-Bar', '2')
])
self.assert_equal(headers.getlist('x-bar'), ['1', '2'])
self.assert_equal(headers.get('x-Bar'), '1')
self.assert_equal(headers.get('Content-Type'), 'text/plain')
self.assert_equal(headers.setdefault('X-Foo', 'nope'), 'bar')
self.assert_equal(headers.setdefault('X-Bar', 'nope'), '1')
self.assert_equal(headers.setdefault('X-Baz', 'quux'), 'quux')
self.assert_equal(headers.setdefault('X-Baz', 'nope'), 'quux')
headers.pop('X-Baz')
# type conversion
self.assert_equal(headers.get('x-bar', type=int), 1)
self.assert_equal(headers.getlist('x-bar', type=int), [1, 2])
# list like operations
self.assert_equal(headers[0], ('Content-Type', 'text/plain'))
self.assert_equal(headers[:1], self.storage_class([('Content-Type', 'text/plain')]))
del headers[:2]
del headers[-1]
self.assert_equal(headers, self.storage_class([('X-Bar', '1')]))
def test_copying(self):
a = self.storage_class([('foo', 'bar')])
b = a.copy()
a.add('foo', 'baz')
self.assert_equal(a.getlist('foo'), ['bar', 'baz'])
self.assert_equal(b.getlist('foo'), ['bar'])
def test_popping(self):
headers = self.storage_class([('a', 1)])
self.assert_equal(headers.pop('a'), 1)
self.assert_equal(headers.pop('b', 2), 2)
with self.assert_raises(KeyError):
headers.pop('c')
def test_set_arguments(self):
a = self.storage_class()
a.set('Content-Disposition', 'useless')
a.set('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(a['Content-Disposition'], 'attachment; filename=foo')
def test_reject_newlines(self):
h = self.storage_class()
for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar':
with self.assert_raises(ValueError):
h['foo'] = variation
with self.assert_raises(ValueError):
h.add('foo', variation)
with self.assert_raises(ValueError):
h.add('foo', 'test', option=variation)
with self.assert_raises(ValueError):
h.set('foo', variation)
with self.assert_raises(ValueError):
h.set('foo', 'test', option=variation)
def test_slicing(self):
# there's nothing wrong with these being native strings
# Headers doesn't care about the data types
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('Content-Type', 'application/whocares')
h.set('X-Forwarded-For', '192.168.0.123')
h[:] = [(k, v) for k, v in h if k.startswith(u'X-')]
self.assert_equal(list(h), [
('X-Foo-Poo', 'bleh'),
('X-Forwarded-For', '192.168.0.123')
])
def test_bytes_operations(self):
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('X-Whoops', b'\xff')
self.assert_equal(h.get('x-foo-poo', as_bytes=True), b'bleh')
self.assert_equal(h.get('x-whoops', as_bytes=True), b'\xff')
class EnvironHeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.EnvironHeaders
def test_basic_interface(self):
# this happens in multiple WSGI servers because they
# use a vary naive way to convert the headers;
broken_env = {
'HTTP_CONTENT_TYPE': 'text/html',
'CONTENT_TYPE': 'text/html',
'HTTP_CONTENT_LENGTH': '0',
'CONTENT_LENGTH': '0',
'HTTP_ACCEPT': '*',
'wsgi.version': (1, 0)
}
headers = self.storage_class(broken_env)
assert headers
self.assert_equal(len(headers), 3)
self.assert_equal(sorted(headers), [
('Accept', '*'),
('Content-Length', '0'),
('Content-Type', 'text/html')
])
assert not self.storage_class({'wsgi.version': (1, 0)})
self.assert_equal(len(self.storage_class({'wsgi.version': (1, 0)})), 0)
def test_return_type_is_unicode(self):
# environ contains native strings; we return unicode
headers = self.storage_class({
'HTTP_FOO': '\xe2\x9c\x93',
'CONTENT_TYPE': 'text/plain',
})
self.assert_equal(headers['Foo'], u"\xe2\x9c\x93")
assert isinstance(headers['Foo'], text_type)
assert isinstance(headers['Content-Type'], text_type)
iter_output = dict(iter(headers))
self.assert_equal(iter_output['Foo'], u"\xe2\x9c\x93")
assert isinstance(iter_output['Foo'], text_type)
assert isinstance(iter_output['Content-Type'], text_type)
def test_bytes_operations(self):
foo_val = '\xff'
h = self.storage_class({
'HTTP_X_FOO': foo_val
})
self.assert_equal(h.get('x-foo', as_bytes=True), b'\xff')
self.assert_equal(h.get('x-foo'), u'\xff')
class HeaderSetTestCase(WerkzeugTestCase):
storage_class = datastructures.HeaderSet
def test_basic_interface(self):
hs = self.storage_class()
hs.add('foo')
hs.add('bar')
assert 'Bar' in hs
self.assert_equal(hs.find('foo'), 0)
self.assert_equal(hs.find('BAR'), 1)
assert hs.find('baz') < 0
hs.discard('missing')
hs.discard('foo')
assert hs.find('foo') < 0
self.assert_equal(hs.find('bar'), 0)
with self.assert_raises(IndexError):
hs.index('missing')
self.assert_equal(hs.index('bar'), 0)
assert hs
hs.clear()
assert not hs
class ImmutableListTestCase(WerkzeugTestCase):
storage_class = datastructures.ImmutableList
def test_list_hashable(self):
t = (1, 2, 3, 4)
l = self.storage_class(t)
self.assert_equal(hash(t), hash(l))
self.assert_not_equal(t, l)
def make_call_asserter(assert_equal_func, func=None):
"""Utility to assert a certain number of function calls.
>>> assert_calls, func = make_call_asserter(self.assert_equal)
>>> with assert_calls(2):
func()
func()
"""
calls = [0]
@contextmanager
def asserter(count, msg=None):
calls[0] = 0
yield
assert_equal_func(calls[0], count, msg)
def wrapped(*args, **kwargs):
calls[0] += 1
if func is not None:
return func(*args, **kwargs)
return asserter, wrapped
class CallbackDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CallbackDict
def test_callback_dict_reads(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(0, 'callback triggered by read-only method'):
# read-only methods
dct['a']
dct.get('a')
self.assert_raises(KeyError, lambda: dct['x'])
'a' in dct
list(iter(dct))
dct.copy()
with assert_calls(0, 'callback triggered without modification'):
# methods that may write but don't
dct.pop('z', None)
dct.setdefault('a')
def test_callback_dict_writes(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(8, 'callback not triggered by write method'):
# always-write methods
dct['z'] = 123
dct['z'] = 123 # must trigger again
del dct['z']
dct.pop('b', None)
dct.setdefault('x')
dct.popitem()
dct.update([])
dct.clear()
with assert_calls(0, 'callback triggered by failed del'):
self.assert_raises(KeyError, lambda: dct.__delitem__('x'))
with assert_calls(0, 'callback triggered by failed pop'):
self.assert_raises(KeyError, lambda: dct.pop('x'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MultiDictTestCase))
suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(HeadersTestCase))
suite.addTest(unittest.makeSuite(EnvironHeadersTestCase))
suite.addTest(unittest.makeSuite(HeaderSetTestCase))
suite.addTest(unittest.makeSuite(NativeItermethodsTestCase))
suite.addTest(unittest.makeSuite(CallbackDictTestCase))
return suite
| mit |
Distrotech/intellij-community | python/lib/Lib/encodings/cp852.py | 593 | 35258 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
u'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
u'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
u'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
u'\xac' # 0x00aa -> NOT SIGN
u'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
u'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
u'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
u'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
u'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
u'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
u'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
u'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
u'\u02db' # 0x00f2 -> OGONEK
u'\u02c7' # 0x00f3 -> CARON
u'\u02d8' # 0x00f4 -> BREVE
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\u02d9' # 0x00fa -> DOT ABOVE
u'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
u'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
HBehrens/feedsanitizer | django/forms/formsets.py | 78 | 14981 | from forms import Form
from django.core.exceptions import ValidationError
from django.utils.encoding import StrAndUnicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from fields import IntegerField, BooleanField
from widgets import Media, HiddenInput
from util import ErrorList
__all__ = ('BaseFormSet', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
class BaseFormSet(StrAndUnicode):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.error_class = error_class
self._errors = None
self._non_form_errors = None
# construct the forms in the formset
self._construct_forms()
def __unicode__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return list(self)[index]
def __len__(self):
return len(self.forms)
def _management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError('ManagementForm data is missing or has been tampered with')
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MAX_NUM_FORM_COUNT: self.max_num
})
return form
management_form = property(_management_form)
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[TOTAL_FORM_COUNT]
else:
initial_forms = self.initial_form_count()
total_forms = initial_forms + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the inital data if it's there, 0 otherwise.
initial_forms = self.initial and len(self.initial) or 0
if initial_forms > self.max_num >= 0:
initial_forms = self.max_num
return initial_forms
def _construct_forms(self):
# instantiate all the forms and put them in self.forms
self.forms = []
for i in xrange(self.total_form_count()):
self.forms.append(self._construct_form(i))
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {'auto_id': self.auto_id, 'prefix': self.add_prefix(i)}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty.
if i >= self.initial_form_count():
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
def _get_initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
initial_forms = property(_get_initial_forms)
def _get_extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
extra_forms = property(_get_extra_forms)
def _get_empty_form(self, **kwargs):
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix('__prefix__'),
'empty_permitted': True,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, None)
return form
empty_form = property(_get_empty_form)
# Maybe this should just go away?
def _get_cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
cleaned_data = property(_get_cleaned_data)
def _get_deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion. Raises an
AttributeError if deletion is not allowed.
"""
if not self.is_valid() or not self.can_delete:
raise AttributeError("'%s' object has no attribute 'deleted_forms'" % self.__class__.__name__)
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
deleted_forms = property(_get_deleted_forms)
def _get_ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order spcified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
ordered_forms = property(_get_ordered_forms)
#@classmethod
def get_default_prefix(cls):
return 'form'
get_default_prefix = classmethod(get_default_prefix)
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is not None:
return self._non_form_errors
return self.error_class()
def _get_errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
errors = property(_get_errors)
def _should_delete_form(self, form):
# The way we lookup the value of the deletion field here takes
# more code than we'd like, but the form's cleaned_data will
# not exist if the form is invalid.
field = form.fields[DELETION_FIELD_NAME]
raw_value = form._raw_value(DELETION_FIELD_NAME)
should_delete = field.clean(raw_value)
return should_delete
def is_valid(self):
"""
Returns True if form.errors is empty for every form in self.forms.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
err = self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
if bool(self.errors[i]):
forms_valid = False
return forms_valid and not bool(self.non_form_errors())
def full_clean(self):
"""
Cleans all of self.data and populates self._errors.
"""
self._errors = []
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
# Give self.clean() a chance to do cross-form validation.
try:
self.clean()
except ValidationError, e:
self._non_form_errors = self.error_class(e.messages)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accesible
via formset.non_form_errors()
"""
pass
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_(u'Order'), initial=index+1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_(u'Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_(u'Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart-encrypted, i.e. it
has FileInput. Otherwise, False.
"""
return self.forms and self.forms[0].is_multipart()
def _get_media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return Media()
media = property(_get_media)
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = u' '.join([form.as_table() for form in self])
return mark_safe(u'\n'.join([unicode(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = u' '.join([form.as_p() for form in self])
return mark_safe(u'\n'.join([unicode(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = u' '.join([form.as_ul() for form in self])
return mark_safe(u'\n'.join([unicode(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None):
"""Return a FormSet for the given form class."""
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'max_num': max_num}
return type(form.__name__ + 'FormSet', (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
| mit |
LukasBoersma/pyowm | tests/functional/webapi25/test_cache_webapi25.py | 1 | 5586 | """
Functional tests for checking PyOWM caching features
"""
import unittest
from time import time
from pyowm.webapi25.configuration25 import parsers
from pyowm.webapi25.owm25 import OWM25
from pyowm.caches.lrucache import LRUCache
from pyowm.abstractions.owmcache import OWMCache
class CacheWrapper(OWMCache):
"""
Wrapper class whose aim is to track down real API calls and cache hits.
"""
def __init__(self, cache):
self._cache = cache
self.__api_calls = 0
self.__last_request_was_hit = False
def get(self, request_url):
result = self._cache.get(request_url)
if result:
self.__last_request_was_hit = True
else:
self.__last_request_was_hit = False
self.__api_calls += 1
return result
def set(self, request_url, response_json):
return self._cache.set(request_url, response_json)
def last_request_was_hit(self):
return self.__last_request_was_hit
def api_calls(self):
return self.__api_calls
class CacheTestWebAPI25(unittest.TestCase):
def test_caching_prevents_API_calls(self):
cache = LRUCache(20, 1000 * 60 * 60)
wrapped_cache = CacheWrapper(cache)
owm = OWM25(parsers, '5746e1a976021a0', wrapped_cache)
self.assertFalse(wrapped_cache.last_request_was_hit())
self.assertEqual(0, wrapped_cache.api_calls())
owm.weather_at_place('London,uk') # Comes from OWM web API
self.assertFalse(wrapped_cache.last_request_was_hit())
self.assertEqual(1, wrapped_cache.api_calls())
owm.weather_at_place('London,uk') # Comes from cache
self.assertTrue(wrapped_cache.last_request_was_hit())
self.assertEqual(1, wrapped_cache.api_calls())
owm.weather_at_place('London,uk') # Comes from cache again
self.assertTrue(wrapped_cache.last_request_was_hit())
self.assertEqual(1, wrapped_cache.api_calls())
owm.weather_at_place('Kiev') # Comes from OWM web API
self.assertFalse(wrapped_cache.last_request_was_hit())
self.assertEqual(2, wrapped_cache.api_calls())
owm.weather_at_place('Kiev') # Comes from cache
self.assertTrue(wrapped_cache.last_request_was_hit())
self.assertEqual(2, wrapped_cache.api_calls())
owm.weather_at_place('London,uk') # Comes from cache
self.assertTrue(wrapped_cache.last_request_was_hit())
self.assertEqual(2, wrapped_cache.api_calls())
def test_cache_limits(self):
"""
Test that when cache is full, cached elements undergo a turnover and
the real OWM web API is invoked
"""
cache = LRUCache(3, 1000 * 60 * 60) # Only three cacheable elements!
wrapped_cache = CacheWrapper(cache)
owm = OWM25(parsers, '5746e1a976021a0', wrapped_cache)
owm.weather_at_place('London,uk') # Comes from OWM web API
owm.weather_at_place('Kiev') # Comes from OWM web API
owm.weather_at_place('Madrid') # Comes from OWM web API
self.assertEqual(3, wrapped_cache.api_calls())
owm.weather_at_place('London,uk') # Comes from cache
owm.weather_at_place('Kiev') # Comes from cache
self.assertEqual(3, wrapped_cache.api_calls())
owm.weather_at_place('Tokyo')
self.assertEqual(4, wrapped_cache.api_calls())
owm.weather_at_place('Madrid') # Now Madrid should have been pulled out of cache
self.assertEqual(5, wrapped_cache.api_calls())
def test_caching_times(self):
"""
Test that subsequent calls to the same endpoint and with the same
query parameters are cached if OWM instance is configured with a
non-null cache.
"""
cache = LRUCache(20, 1000 * 60 * 60)
owm = OWM25(parsers, '5746e1a976021a0', cache)
before_request = time()
o1 = owm.weather_at_place('London,uk') # Comes from OWM web API
after_request = time()
o2 = owm.weather_at_place('London,uk') # Comes from cache
after_cache_hit_1 = time()
owm.weather_at_place('Kiev') # Comes from OWM web API
owm.weather_at_coords(-33.936524, 18.503723) # Comes from OWM web API
owm.weather_at_coords(-33.936524, 18.503723) # Cached, we don't care
owm.weather_at_coords(-33.936524, 18.503723) # Cached, we don't care
before_cache_hit_2 = time()
o3 = owm.weather_at_place('London,uk') # Comes from cache
after_cache_hit_2 = time()
#Check results: difference in reception time should not be less than 20 sec
self.assertAlmostEquals(o1.get_reception_time(),
o2.get_reception_time(),
places=None, msg=None, delta=20)
self.assertAlmostEquals(o1.get_reception_time(),
o3.get_reception_time(),
places=None, msg=None, delta=20)
#Check times: all cache hit times must be less than the former OWM web
#API request time and ratio between cache hit times and request time
#should be far less than 1
req_delay = after_request - before_request
cache_hit_1_delay = after_cache_hit_1 - after_request
cache_hit_2_delay = after_cache_hit_2 - before_cache_hit_2
self.assertTrue(cache_hit_1_delay < req_delay)
self.assertTrue(cache_hit_2_delay < req_delay)
self.assertTrue(cache_hit_1_delay / req_delay < 1)
self.assertTrue(cache_hit_2_delay / req_delay < 1)
| mit |
plucena24/OpenClos | jnpr/openclos/tests/unit/test_ztp.py | 3 | 6153 | '''
Created on Sep 11, 2014
@author: moloyc
'''
import unittest
from flexmock import flexmock
from jnpr.openclos.ztp import ZtpServer
from test_model import createPod, createDevice, createPodDevice, LeafSetting
from test_dao import InMemoryDao
class TestZtp(unittest.TestCase):
def setUp(self):
self.__conf = {}
self.__conf['httpServer'] = {'ipAddr': '127.0.0.1'}
self.ztpServer = ZtpServer(self.__conf, daoClass = InMemoryDao)
self._dao = InMemoryDao.getInstance()
def tearDown(self):
InMemoryDao._destroy()
def testGenerateDhcpConfWithNoPodDevice(self):
from jnpr.openclos.l3Clos import util
flexmock(util, isPlatformUbuntu = True)
with self._dao.getReadWriteSession() as session:
dhcpConf = self.ztpServer.generateSingleDhcpConf(session)
self.assertFalse('{{' in dhcpConf)
self.assertFalse('}}' in dhcpConf)
self.assertEquals(1, dhcpConf.count('host-name')) # 1 global + 0 device
def testGenerateSingleDhcpConf(self):
from jnpr.openclos.l3Clos import util
flexmock(util, isPlatformUbuntu = True)
with self._dao.getReadWriteSession() as session:
createDevice(session, 'dev1')
createDevice(session, 'dev2')
dhcpConf = self.ztpServer.generateSingleDhcpConf(session)
self.assertFalse('{{' in dhcpConf)
self.assertFalse('}}' in dhcpConf)
self.assertEquals(3, dhcpConf.count('host-name')) # 1 global + 2 device
def testGeneratePodSpecificDhcpConf(self):
from jnpr.openclos.l3Clos import util
flexmock(util, isPlatformUbuntu = True)
with self._dao.getReadWriteSession() as session:
pod = createPod('pod1', session)
pod.spineJunosImage = 'testSpineImage'
createPodDevice(session, 'dev1', pod)
dev2 = createPodDevice(session, 'dev2', pod)
dev3 = createPodDevice(session, 'dev3', pod)
dev3.role = 'leaf'
dhcpConf = self.ztpServer.generatePodSpecificDhcpConf(session, pod.id)
self.assertEquals(2, dhcpConf.count('testSpineImage'))
self.assertFalse('{{' in dhcpConf)
self.assertFalse('}}' in dhcpConf)
self.assertFalse('None' in dhcpConf)
self.assertEquals(4, dhcpConf.count('host-name')) # 1 global + 3 device
def testGeneratePodSpecificDhcpConfWithSerial(self):
from jnpr.openclos.l3Clos import util
flexmock(util, isPlatformUbuntu = True)
with self._dao.getReadWriteSession() as session:
pod = createPod('pod1', session)
pod.spineJunosImage = 'testSpineImage'
createPodDevice(session, 'dev1', pod)
dev2 = createPodDevice(session, 'dev2', pod)
dev2.macAddress = None
dev2.serialNumber = 'VB1234567890'
dev3 = createPodDevice(session, 'dev3', pod)
dev3.role = 'leaf'
dev3.serialNumber = 'VB1234567891'
dhcpConf = self.ztpServer.generatePodSpecificDhcpConf(session, pod.id)
print dhcpConf
self.assertEquals(2, dhcpConf.count('testSpineImage'))
self.assertFalse('{{' in dhcpConf)
self.assertFalse('}}' in dhcpConf)
self.assertFalse('None' in dhcpConf)
self.assertTrue('VB1234567890' in dhcpConf)
self.assertTrue('VB1234567891' not in dhcpConf)
self.assertEquals(5, dhcpConf.count('host-name')) # 1 global class + 1 subnet + 2 device mac + 1 device serial
def testGeneratePodSpecificDhcpConfFor2StageZtp(self):
from jnpr.openclos.l3Clos import util
flexmock(util, isPlatformUbuntu = True)
flexmock(util, isZtpStaged = True)
with self._dao.getReadWriteSession() as session:
pod = createPod('pod1', session)
pod.spineJunosImage = 'testSpineImage'
pod.leafSettings.append(LeafSetting('ex4300-24p', pod.id))
dev1 = createPodDevice(session, 'dev1', pod)
dev2 = createPodDevice(session, 'dev2', pod)
dev3 = createPodDevice(session, 'dev3', pod)
dev3.role = 'leaf'
dev4 = createPodDevice(session, 'dev4', pod)
dev4.role = 'leaf'
dhcpConf = self.ztpServer.generatePodSpecificDhcpConf(session, pod.id)
self.assertEquals(2, dhcpConf.count('testSpineImage'))
self.assertFalse('{{' in dhcpConf)
self.assertFalse('}}' in dhcpConf)
self.assertFalse('None' in dhcpConf)
self.assertEquals(3, dhcpConf.count('host-name')) # 1 global + 2 spine device
self.assertEquals(1, dhcpConf.count('pool'))
self.assertEquals(2, dhcpConf.count('class '))
self.assertEquals(4, dhcpConf.count('vendor-class-identifier'))
def testPopulateDhcpGlobalSettings(self):
from jnpr.openclos.l3Clos import util
globalZtpConf = {'ztp': {'dhcpSubnet': '10.20.30.0/25', 'dhcpOptionRoute': '10.20.30.254', 'dhcpOptionRangeStart': '10.20.30.15','dhcpOptionRangeEnd': '10.20.30.20'}}
flexmock(util, loadClosDefinition = globalZtpConf)
globalSetting = self.ztpServer.populateDhcpGlobalSettings()
self.assertEquals('10.20.30.0', globalSetting['network'])
self.assertEquals('255.255.255.128', globalSetting['netmask'])
self.assertEquals('10.20.30.254', globalSetting['defaultRoute'])
self.assertEquals('10.20.30.15', globalSetting['rangeStart'])
self.assertEquals('10.20.30.20', globalSetting['rangeEnd'])
globalZtpConf = {'ztp': {'dhcpSubnet': '10.20.30.0/25'}}
flexmock(util, loadClosDefinition = globalZtpConf)
globalSetting = self.ztpServer.populateDhcpGlobalSettings()
self.assertEquals('10.20.30.1', globalSetting['defaultRoute'])
self.assertEquals('10.20.30.2', globalSetting['rangeStart'])
self.assertEquals('10.20.30.126', globalSetting['rangeEnd'])
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | apache-2.0 |
scotthartbti/android_external_chromium_org | tools/ipc_messages_log.py | 170 | 4695 | #!/usr/bin/python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""""Processes a log file and resolves IPC message identifiers.
Resolves IPC messages of the form [unknown type NNNNNN] to named IPC messages.
e.g. logfile containing
I/stderr ( 3915): ipc 3915.3.1370207904 2147483647 S [unknown type 66372]
will be transformed to:
I/stderr ( 3915): ipc 3915.3.1370207904 2147483647 S ViewMsg_SetCSSColors
In order to find the message header files efficiently, it requires that
Chromium is checked out using git.
"""
import optparse
import os
import re
import subprocess
import sys
def _SourceDir():
"""Get chromium's source directory."""
return os.path.join(sys.path[0], '..')
def _ReadLines(f):
"""Read from file f and generate right-stripped lines."""
for line in f:
yield line.rstrip()
def _GetMsgStartTable():
"""Read MsgStart enumeration from ipc/ipc_message_utils.h.
Determines the message type identifiers by reading.
header file ipc/ipc_message_utils.h and looking for
enum IPCMessageStart. Assumes following code format in header file:
enum IPCMessageStart {
Type1MsgStart ...,
Type2MsgStart,
};
Returns:
A dictionary mapping StartName to enumeration value.
"""
ipc_message_file = _SourceDir() + '/ipc/ipc_message_utils.h'
ipc_message_lines = _ReadLines(open(ipc_message_file))
is_msg_start = False
count = 0
msg_start_table = dict()
for line in ipc_message_lines:
if is_msg_start:
if line.strip() == '};':
break
msgstart_index = line.find('MsgStart')
msg_type = line[:msgstart_index] + 'MsgStart'
msg_start_table[msg_type.strip()] = count
count+=1
elif line.strip() == 'enum IPCMessageStart {':
is_msg_start = True
return msg_start_table
def _FindMessageHeaderFiles():
"""Look through the source directory for *_messages.h."""
os.chdir(_SourceDir())
pipe = subprocess.Popen(['git', 'ls-files', '--', '*_messages.h'],
stdout=subprocess.PIPE)
return _ReadLines(pipe.stdout)
def _GetMsgId(msg_start, line_number, msg_start_table):
"""Construct the meessage id given the msg_start and the line number."""
hex_str = '%x%04x' % (msg_start_table[msg_start], line_number)
return int(hex_str, 16)
def _ReadHeaderFile(f, msg_start_table, msg_map):
"""Read a header file and construct a map from message_id to message name."""
msg_def_re = re.compile(
'^IPC_(?:SYNC_)?MESSAGE_[A-Z0-9_]+\(([A-Za-z0-9_]+).*')
msg_start_re = re.compile(
'^\s*#define\s+IPC_MESSAGE_START\s+([a-zA-Z0-9_]+MsgStart).*')
msg_start = None
msg_name = None
line_number = 0
for line in f:
line_number+=1
match = re.match(msg_start_re, line)
if match:
msg_start = match.group(1)
# print "msg_start = " + msg_start
match = re.match(msg_def_re, line)
if match:
msg_name = match.group(1)
# print "msg_name = " + msg_name
if msg_start and msg_name:
msg_id = _GetMsgId(msg_start, line_number, msg_start_table)
msg_map[msg_id] = msg_name
return msg_map
def _ResolveMsg(msg_type, msg_map):
"""Fully resolve a message type to a name."""
if msg_type in msg_map:
return msg_map[msg_type]
else:
return '[Unknown message %d (0x%x)]x' % (msg_type, msg_type)
def _ProcessLog(f, msg_map):
"""Read lines from f and resolve the IPC messages according to msg_map."""
unknown_msg_re = re.compile('\[unknown type (\d+)\]')
for line in f:
line = line.rstrip()
match = re.search(unknown_msg_re, line)
if match:
line = re.sub(unknown_msg_re,
_ResolveMsg(int(match.group(1)), msg_map),
line)
print line
def _GetMsgMap():
"""Returns a dictionary mapping from message number to message name."""
msg_start_table = _GetMsgStartTable()
msg_map = dict()
for header_file in _FindMessageHeaderFiles():
_ReadHeaderFile(open(header_file),
msg_start_table,
msg_map)
return msg_map
def main():
"""Processes one or more log files with IPC logging messages.
Replaces '[unknown type NNNNNN]' with resolved
IPC messages.
Reads from standard input if no log files specified on the
command line.
"""
parser = optparse.OptionParser('usage: %prog [LOGFILE...]')
(_, args) = parser.parse_args()
msg_map = _GetMsgMap()
log_files = args
if log_files:
for log_file in log_files:
_ProcessLog(open(log_file), msg_map)
else:
_ProcessLog(sys.stdin, msg_map)
if __name__ == '__main__':
main()
| bsd-3-clause |
blokeley/backup_dropbox | backup.py | 2 | 11965 | """Backup Dropbox Business files.
See README.md for full instructions.
"""
import argparse
from datetime import date, datetime
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import json
import logging
import logging.config
import os
import re
import string
import sys
import time
from typing import Callable, Generic, Iterator, Set, TypeVar
import queue
import dropbox # type: ignore
__version__ = '2.1.8'
DOWNLOAD_THREADS = 8
MAX_QUEUE_SIZE = 100_000
# Characters that are illegal in Windows paths.
# See https://msdn.microsoft.com/en-us/library/aa365247
ILLEGAL_PATH_CHARS = r'<>:"|?*'
ILLEGAL_PATH_PATTERN = re.compile(f'[{re.escape(ILLEGAL_PATH_CHARS)}]')
# Type for mypy generics
T = TypeVar('T')
class SetQueue(queue.Queue, Generic[T]):
"""Queue which will allow a given object to be put once only.
Objects are considered identical if hash(object) are identical.
"""
def __init__(self, maxsize: int = 0) -> None:
"""Initialise queue with maximum number of items.
0 for infinite queue
"""
super().__init__(maxsize)
self.all_items: Set[T] = set()
def _put(self, item: T) -> None:
# Allow multiple Nones to be queued to act as sentinels
if item not in self.all_items or item is None:
super()._put(item)
self.all_items.add(item)
class File:
"""File on Dropbox.
Class required to make files hashable and track the owning member.
"""
def __init__(self, file: dropbox.files.Metadata,
member: dropbox.team.TeamMemberProfile) -> None:
self.file = file
self.member = member
def __hash__(self) -> int:
"""Make File hashable for use in sets."""
return hash(self.file.id)
def __eq__(self, other: object) -> bool:
"""Must implement __eq__ if we implement __hash__."""
if isinstance(other, File):
return self.file.id == other.file.id
return NotImplemented
def __repr__(self):
return self.file.path_display
def parse_args() -> argparse.Namespace:
"""Parse command line arguments."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version',
version=f'%(prog)s {__version__}')
msg = 'select only files modified since date in YYYY-MM-DD format'
parser.add_argument('--since', help=msg)
msg = 'select only files up to size in MB inclusive'
parser.add_argument('--maxsize', type=int, help=msg)
msg = 'path of output directory. Default is "yyyy-mm-dd backup".'
parser.add_argument('--out', help=msg)
msg = ('Dropbox Business access token. The environment variable '
'DROPBOX_TEAM_TOKEN is used if token is not supplied.')
parser.add_argument('--token', help=msg)
args = parser.parse_args()
# Create an output directory name if one was not given
if not args.out:
args.out = date.today().strftime('%Y-%m-%d') + ' backup'
# If since was specified, append it to the output directory name
if args.since:
args.out = ' '.join((args.out, 'since', args.since))
# Convert since to a datetime object
if args.since:
args.since = datetime.strptime(args.since, '%Y-%m-%d')
if args.since > datetime.now():
msg = '"Since" date must not be later than today.'
parser.error(msg)
if not args.token:
try:
args.token = os.environ['DROPBOX_TEAM_TOKEN']
except KeyError:
parser.error('Dropbox Team token required')
return args
def setup_logging() -> None:
DEFAULT_LOGGING = {
"version": 1,
"formatters": {
"standard": {
"format": "%(asctime)s %(levelname)-8s %(name)s: %(message)s"
},
"brief": {
"format": "%(asctime)s %(levelname)-8s %(message)s",
"datefmt": "%H:%M:%S"
}
},
"handlers": {
"console": {
"formatter": "brief",
"class": "logging.StreamHandler"
},
"file": {
"formatter": "standard",
"class": "logging.handlers.RotatingFileHandler",
"filename": "backup.log",
"maxBytes": 10_000_000,
"backupCount": 20,
"encoding": "utf-8"
}
},
"loggers": {
# Prevent numerous INFO messages from the dropbox package
"dropbox": {
"level": "WARNING"
}
},
"root": {
"level": "INFO",
"handlers": ["console", "file"]
}
}
try:
with open('logging_config.json') as f:
logging.config.dictConfig(json.load(f))
except FileNotFoundError:
logging.config.dictConfig(DEFAULT_LOGGING)
def get_members(team: dropbox.dropbox.DropboxTeam) \
-> Iterator[dropbox.team.TeamMemberProfile]:
"""Generate Dropbox Businesss members."""
members_list = team.team_members_list()
for member in members_list.members:
yield member
while members_list.has_more:
members_list = team.team_members_list_continue(members_list.cursor)
for member in members_list.members:
yield member
def enqueue(member: dropbox.team.TeamMemberProfile, q: queue.Queue,
getter: Callable[[dropbox.team.TeamMemberInfo], Iterator[File]],
predicate: Callable[[dropbox.files.Metadata], bool]) -> None:
"""Enqueue files for member if predicate(file) is True."""
for f in getter(member):
if predicate(f):
q.put(f)
def dequeue(q: queue.Queue, download: Callable[[File], None]) -> None:
"""Call download on each item in queue until q.get() returns None."""
logger = logging.getLogger('backup.dequeue')
while True:
file = q.get()
if file is None:
logger.info(f'Poison pill found with {q.qsize()} left in queue')
break
member_name = file.member.profile.name.display_name
msg = f'{q.qsize()} left in queue. Downloading {file} as {member_name}'
logger.info(msg)
download(file)
def get_files(member: dropbox.team.TeamMemberInfo,
team: dropbox.DropboxTeam) -> Iterator[File]:
"""Generate files for the given member."""
logger = logging.getLogger('backup.get_files')
display_name = member.profile.name.display_name
logger.info(f'Listing files for {display_name}')
user = team.as_user(member.profile.team_member_id)
folder_list = user.files_list_folder('', True)
for entry in folder_list.entries:
logger.debug(f'Found {entry.path_display}')
yield File(entry, member)
while folder_list.has_more:
folder_list = user.files_list_folder_continue(folder_list.cursor)
for entry in folder_list.entries:
logger.debug(f'Found {entry.path_display}')
yield File(entry, member)
logger.info(f'No more files for {display_name}')
def should_download(file: File, args: argparse.Namespace) -> bool:
"""Return the True if file passes the filters specified in args."""
logger = logging.getLogger('backup.should_download')
# Do not download folders
if isinstance(file.file, dropbox.files.FolderMetadata):
return False
try:
# Ignore large files
if args.maxsize is not None and file.file.size > 1e6 * args.maxsize:
logger.debug(f'Too large: {file}')
return False
# Ignore files modified before given date
if args.since is not None and args.since > file.file.server_modified:
logger.debug(f'Too old: {file}')
return False
except AttributeError:
# Not a file. Don't mark to download
logger.error(f'Does not have file attributes: {file}')
return False
# Return all other files
logger.debug(f'OK: {file}')
return True
def remove_unprintable(text: str) -> str:
"""Remove unprintable unicode characters."""
return ''.join(c for c in text if c in string.printable)
def remove_illegal(path: str) -> str:
"""Remove illegal characters."""
return re.sub(ILLEGAL_PATH_PATTERN, '', path)
def download(file: File, team: dropbox.dropbox.DropboxTeam,
root: str) -> None:
"""Save the file under the root directory given."""
logger = logging.getLogger('backup.download')
path = remove_illegal(remove_unprintable(file.file.path_display))
# Remove the leading slash from printable_path
local_path = os.path.join(root, path[1:])
member_name = file.member.profile.name.display_name
logger.debug(f'Saving {local_path} as {member_name}')
# Create output directory if it does not exist
try:
os.makedirs(os.path.dirname(local_path), exist_ok=True)
user = team.as_user(file.member.profile.team_member_id)
user.files_download_to_file(local_path, file.file.path_display)
except FileNotFoundError:
# FileNotFoundError raised if path is too long
# If this occurs, see https://bugs.python.org/issue27731
logger.exception('Path might be too long')
except dropbox.exceptions.ApiError as ex:
if ex.user_message_text:
logger.error('API error message: ' + ex.user_message_text)
else:
fmt = '{} for {} as {}'
logger.error(fmt.format(ex.error, file.file.path_display,
file.member.profile.name.display_name))
except Exception:
msgs = [f'Exception whilst saving {local_path}',
f'Dropbox path is {file.file.path_display}',
f'File ID is {file.file.id}',
f'User is {file.member.profile.name.display_name}',
f'User ID is {file.member.profile.team_member_id}']
logger.exception(os.linesep.join(msgs))
def list_and_save(args: argparse.Namespace) -> None:
"""List and save Dropbox files (main program)."""
logger = logging.getLogger('backup.list_and_save')
logger.info(f'{__file__} version {__version__}')
team = dropbox.DropboxTeam(args.token)
# Sycnhonised Queue of File objects to download
file_queue = SetQueue[File](MAX_QUEUE_SIZE)
# Create partial functions to save invariant arguments
_get_files = partial(get_files, team=team)
_should_download = partial(should_download, args=args)
_downloader = partial(download, team=team, root=args.out)
with ThreadPoolExecutor(DOWNLOAD_THREADS) as consumer_exec:
# Start the threads to download files
for _ in range(DOWNLOAD_THREADS):
consumer_exec.submit(dequeue, file_queue, _downloader)
# Start the threads to get file names
with ThreadPoolExecutor() as producer_exec:
for member in get_members(team):
producer_exec.submit(enqueue, member, file_queue, _get_files,
_should_download)
# Tell the threads we're done
logger.debug('Shutting down the consumer threads')
for _ in range(DOWNLOAD_THREADS):
file_queue.put(None)
def main() -> int:
setup_logging()
logger = logging.getLogger('backup.main')
# Parse command line arguments
args = parse_args()
try:
start = time.time()
list_and_save(args)
logger.info(f'Exit OK at {time.time() - start:.2f} s')
return 0
# Ignore SystemExit exceptions (raised by argparse.parse_args() etc.)
except SystemExit:
logger.info(f'SystemExit raised at {time.time() - start:.2f} s')
return 1
# Report all other exceptions
except Exception:
logger.exception(f'Uncaught exception at {time.time() - start:.2f} s')
return -1
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
RobbieD/Assignment5 | js/utils/exporters/maya/plug-ins/threeJsFileTranslator.py | 14 | 9656 | __author__ = 'Chris Lewis'
__version__ = '0.1.0'
__email__ = 'clewis1@c.ringling.edu'
import sys
import json
import maya.cmds as mc
from maya.OpenMaya import *
from maya.OpenMayaMPx import *
kPluginTranslatorTypeName = 'Three.js'
kOptionScript = 'ThreeJsExportScript'
kDefaultOptionsString = '0'
FLOAT_PRECISION = 8
# adds decimal precision to JSON encoding
class DecimalEncoder(json.JSONEncoder):
def _iterencode(self, o, markers=None):
if isinstance(o, float):
s = str(o)
if '.' in s and len(s[s.index('.'):]) > FLOAT_PRECISION - 1:
s = '%.{0}f'.format(FLOAT_PRECISION) % o
while '.' in s and s[-1] == '0':
s = s[:-1] # this actually removes the last "0" from the string
if s[-1] == '.': # added this test to avoid leaving "0." instead of "0.0",
s += '0' # which would throw an error while loading the file
return (s for s in [s])
return super(DecimalEncoder, self)._iterencode(o, markers)
class ThreeJsError(Exception):
pass
class ThreeJsWriter(object):
def __init__(self):
self.componentKeys = ['vertices', 'normals', 'colors', 'uvs', 'materials', 'faces']
def _parseOptions(self, optionsString):
self.options = dict([(x, False) for x in self.componentKeys])
optionsString = optionsString[2:] # trim off the "0;" that Maya adds to the options string
for option in optionsString.split(' '):
self.options[option] = True
def _updateOffsets(self):
for key in self.componentKeys:
if key == 'uvs':
continue
self.offsets[key] = len(getattr(self, key))
for i in range(len(self.uvs)):
self.offsets['uvs'][i] = len(self.uvs[i])
def _getTypeBitmask(self, options):
bitmask = 0
if options['materials']:
bitmask |= 2
if options['uvs']:
bitmask |= 8
if options['normals']:
bitmask |= 32
if options['colors']:
bitmask |= 128
return bitmask
def _exportMesh(self, dagPath, component):
mesh = MFnMesh(dagPath)
options = self.options.copy()
self._updateOffsets()
# export vertex data
if options['vertices']:
try:
iterVerts = MItMeshVertex(dagPath, component)
while not iterVerts.isDone():
point = iterVerts.position(MSpace.kWorld)
self.vertices += [point.x, point.y, point.z]
iterVerts.next()
except:
options['vertices'] = False
# export material data
# TODO: actually parse material data
materialIndices = MIntArray()
if options['materials']:
try:
shaders = MObjectArray()
mesh.getConnectedShaders(0, shaders, materialIndices)
while len(self.materials) < shaders.length():
self.materials.append({}) # placeholder material definition
except:
self.materials = [{}]
# export uv data
if options['uvs']:
try:
uvLayers = []
mesh.getUVSetNames(uvLayers)
while len(uvLayers) > len(self.uvs):
self.uvs.append([])
self.offsets['uvs'].append(0)
for i, layer in enumerate(uvLayers):
uList = MFloatArray()
vList = MFloatArray()
mesh.getUVs(uList, vList, layer)
for j in xrange(uList.length()):
self.uvs[i] += [uList[j], vList[j]]
except:
options['uvs'] = False
# export normal data
if options['normals']:
try:
normals = MFloatVectorArray()
mesh.getNormals(normals, MSpace.kWorld)
for i in xrange(normals.length()):
point = normals[i]
self.normals += [point.x, point.y, point.z]
except:
options['normals'] = False
# export color data
if options['colors']:
try:
colors = MColorArray()
mesh.getColors(colors)
for i in xrange(colors.length()):
color = colors[i]
# uncolored vertices are set to (-1, -1, -1). Clamps colors to (0, 0, 0).
self.colors += [max(color.r, 0), max(color.g, 0), max(color.b, 0)]
except:
options['colors'] = False
# export face data
if not options['vertices']:
return
bitmask = self._getTypeBitmask(options)
iterPolys = MItMeshPolygon(dagPath, component)
while not iterPolys.isDone():
self.faces.append(bitmask)
# export face vertices
verts = MIntArray()
iterPolys.getVertices(verts)
for i in xrange(verts.length()):
self.faces.append(verts[i] + self.offsets['vertices'])
# export face vertex materials
if options['materials']:
if materialIndices.length():
self.faces.append(materialIndices[iterPolys.index()])
# export face vertex uvs
if options['uvs']:
util = MScriptUtil()
uvPtr = util.asIntPtr()
for i, layer in enumerate(uvLayers):
for j in xrange(verts.length()):
iterPolys.getUVIndex(j, uvPtr, layer)
uvIndex = util.getInt(uvPtr)
self.faces.append(uvIndex + self.offsets['uvs'][i])
# export face vertex normals
if options['normals']:
for i in xrange(3):
normalIndex = iterPolys.normalIndex(i)
self.faces.append(normalIndex + self.offsets['normals'])
# export face vertex colors
if options['colors']:
colors = MIntArray()
iterPolys.getColorIndices(colors)
for i in xrange(colors.length()):
self.faces.append(colors[i] + self.offsets['colors'])
iterPolys.next()
def _getMeshes(self, nodes):
meshes = []
for node in nodes:
if mc.nodeType(node) == 'mesh':
meshes.append(node)
else:
for child in mc.listRelatives(node, s=1):
if mc.nodeType(child) == 'mesh':
meshes.append(child)
return meshes
def _exportMeshes(self):
# export all
if self.accessMode == MPxFileTranslator.kExportAccessMode:
mc.select(self._getMeshes(mc.ls(typ='mesh')))
# export selection
elif self.accessMode == MPxFileTranslator.kExportActiveAccessMode:
mc.select(self._getMeshes(mc.ls(sl=1)))
else:
raise ThreeJsError('Unsupported access mode: {0}'.format(self.accessMode))
dups = [mc.duplicate(mesh)[0] for mesh in mc.ls(sl=1)]
combined = mc.polyUnite(dups, mergeUVSets=1, ch=0) if len(dups) > 1 else dups[0]
mc.polyTriangulate(combined)
mc.select(combined)
sel = MSelectionList()
MGlobal.getActiveSelectionList(sel)
mDag = MDagPath()
mComp = MObject()
sel.getDagPath(0, mDag, mComp)
self._exportMesh(mDag, mComp)
mc.delete(combined)
def write(self, path, optionString, accessMode):
self.path = path
self._parseOptions(optionString)
self.accessMode = accessMode
self.root = dict(metadata=dict(formatVersion=3))
self.offsets = dict()
for key in self.componentKeys:
setattr(self, key, [])
self.offsets[key] = 0
self.offsets['uvs'] = []
self.uvs = []
self._exportMeshes()
# add the component buffers to the root JSON object
for key in self.componentKeys:
buffer_ = getattr(self, key)
if buffer_:
self.root[key] = buffer_
# materials are required for parsing
if not self.root.has_key('materials'):
self.root['materials'] = [{}]
# write the file
with file(self.path, 'w') as f:
f.write(json.dumps(self.root, separators=(',',':'), cls=DecimalEncoder))
class ThreeJsTranslator(MPxFileTranslator):
def __init__(self):
MPxFileTranslator.__init__(self)
def haveWriteMethod(self):
return True
def filter(self):
return '*.js'
def defaultExtension(self):
return 'js'
def writer(self, fileObject, optionString, accessMode):
path = fileObject.fullName()
writer = ThreeJsWriter()
writer.write(path, optionString, accessMode)
def translatorCreator():
return asMPxPtr(ThreeJsTranslator())
def initializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.registerFileTranslator(kPluginTranslatorTypeName, None, translatorCreator, kOptionScript, kDefaultOptionsString)
except:
sys.stderr.write('Failed to register translator: %s' % kPluginTranslatorTypeName)
raise
def uninitializePlugin(mobject):
mplugin = MFnPlugin(mobject)
try:
mplugin.deregisterFileTranslator(kPluginTranslatorTypeName)
except:
sys.stderr.write('Failed to deregister translator: %s' % kPluginTranslatorTypeName)
raise | gpl-3.0 |
raccoongang/edx-platform | openedx/core/djangoapps/content/block_structure/api.py | 25 | 1921 | """
Higher order functions built on the BlockStructureManager to interact with a django cache.
"""
from django.core.cache import cache
from xmodule.modulestore.django import modulestore
from .manager import BlockStructureManager
def get_course_in_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.get_collected function that returns the block
structure in the cache for the given course_key.
Returns:
BlockStructureBlockData - The collected block structure,
starting at root_block_usage_key.
"""
return get_block_structure_manager(course_key).get_collected()
def update_course_in_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.updated_collected function that updates the block
structure in the cache for the given course_key.
"""
return get_block_structure_manager(course_key).update_collected_if_needed()
def clear_course_from_cache(course_key):
"""
A higher order function implemented on top of the
block_structure.clear_block_cache function that clears the block
structure from the cache for the given course_key.
Note: See Note in get_course_blocks. Even after MA-1604 is
implemented, this implementation should still be valid since the
entire block structure of the course is cached, even though
arbitrary access to an intermediate block will be supported.
"""
get_block_structure_manager(course_key).clear()
def get_block_structure_manager(course_key):
"""
Returns the manager for managing Block Structures for the given course.
"""
store = modulestore()
course_usage_key = store.make_course_usage_key(course_key)
return BlockStructureManager(course_usage_key, store, get_cache())
def get_cache():
"""
Returns the storage for caching Block Structures.
"""
return cache
| agpl-3.0 |
blueboxgroup/neutron | neutron/tests/unit/nec/test_ofc_manager.py | 16 | 13360 | # Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron import context
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.common import config
from neutron.plugins.nec.db import api as ndb
from neutron.plugins.nec import ofc_manager
from neutron.tests.unit import testlib_api
class FakePortInfo(object):
def __init__(self, id, datapath_id, port_no=0,
vlan_id=65535, mac='00:11:22:33:44:55'):
self.data = {'id': id, 'datapath_id': datapath_id,
'port_no': port_no, 'vlan_id': vlan_id, 'mac': mac}
def __getattr__(self, name):
if name in self.fields:
return self[name]
else:
raise AttributeError(name)
class OFCManagerTestBase(testlib_api.SqlTestCase):
"""Class conisting of OFCManager unit tests."""
def setUp(self):
super(OFCManagerTestBase, self).setUp()
driver = "neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver"
config.CONF.set_override('driver', driver, 'OFC')
self.plugin = mock.Mock()
self.plugin.get_packet_filters_for_port.return_value = None
self.ofc = ofc_manager.OFCManager(self.plugin)
# NOTE: enable_autocheck() is a feature of StubOFCDriver
self.ofc.driver.enable_autocheck()
self.ctx = context.get_admin_context()
def get_random_params(self):
"""create random parameters for portinfo test."""
tenant = uuidutils.generate_uuid()
network = uuidutils.generate_uuid()
port = uuidutils.generate_uuid()
_filter = uuidutils.generate_uuid()
none = uuidutils.generate_uuid()
return tenant, network, port, _filter, none
class OFCManagerTest(OFCManagerTestBase):
def testa_create_ofc_tenant(self):
"""test create ofc_tenant."""
t, n, p, f, none = self.get_random_params()
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t))
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t))
tenant = ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)
self.assertEqual(tenant.ofc_id, "ofc-" + t[:-4])
def testb_exists_ofc_tenant(self):
"""test exists_ofc_tenant."""
t, n, p, f, none = self.get_random_params()
self.assertFalse(self.ofc.exists_ofc_tenant(self.ctx, t))
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertTrue(self.ofc.exists_ofc_tenant(self.ctx, t))
def testc_delete_ofc_tenant(self):
"""test delete ofc_tenant."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t))
self.ofc.delete_ofc_tenant(self.ctx, t)
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t))
def testd_create_ofc_network(self):
"""test create ofc_network."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
network = ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)
self.assertEqual(network.ofc_id, "ofc-" + n[:-4])
def teste_exists_ofc_network(self):
"""test exists_ofc_network."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertFalse(self.ofc.exists_ofc_network(self.ctx, n))
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertTrue(self.ofc.exists_ofc_network(self.ctx, n))
def testf_delete_ofc_network(self):
"""test delete ofc_network."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
self.ofc.delete_ofc_network(self.ctx, n, {'tenant_id': t})
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
def _mock_get_portinfo(self, port_id, datapath_id='0xabc', port_no=1):
get_portinfo = mock.patch.object(ndb, 'get_portinfo').start()
fake_portinfo = FakePortInfo(id=port_id, datapath_id=datapath_id,
port_no=port_no)
get_portinfo.return_value = fake_portinfo
return get_portinfo
def _test_create_ofc_port(self, with_filter=False):
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p))
get_portinfo = self._mock_get_portinfo(p)
port = {'tenant_id': t, 'network_id': n}
if with_filter:
_filters = ['filter1', 'filter2']
self.plugin.get_packet_filters_for_port.return_value = _filters
self.ofc.create_ofc_port(self.ctx, p, port)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p))
port = ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)
self.assertEqual(port.ofc_id, "ofc-" + p[:-4])
get_portinfo.assert_called_once_with(mock.ANY, p)
portval = self.ofc.driver.ofc_port_dict[port.ofc_id]
if with_filter:
self.assertEqual(_filters, portval['filters'])
else:
self.assertFalse('filters' in portval)
def testg_create_ofc_port(self):
"""test create ofc_port."""
self._test_create_ofc_port(with_filter=False)
def testg_create_ofc_port_with_filters(self):
"""test create ofc_port."""
self._test_create_ofc_port(with_filter=True)
def testh_exists_ofc_port(self):
"""test exists_ofc_port."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertFalse(self.ofc.exists_ofc_port(self.ctx, p))
get_portinfo = self._mock_get_portinfo(p)
port = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_port(self.ctx, p, port)
self.assertTrue(self.ofc.exists_ofc_port(self.ctx, p))
get_portinfo.assert_called_once_with(mock.ANY, p)
def testi_delete_ofc_port(self):
"""test delete ofc_port."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
get_portinfo = self._mock_get_portinfo(p)
port = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_port(self.ctx, p, port)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p))
self.ofc.delete_ofc_port(self.ctx, p, port)
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p))
get_portinfo.assert_called_once_with(mock.ANY, p)
class OFCManagerFilterTest(OFCManagerTestBase):
def testj_create_ofc_packet_filter(self):
"""test create ofc_filter."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertFalse(ndb.get_ofc_item(self.ctx.session,
'ofc_packet_filter', f))
pf = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_packet_filter(self.ctx, f, pf)
self.assertTrue(ndb.get_ofc_item(self.ctx.session,
'ofc_packet_filter', f))
_filter = ndb.get_ofc_item(self.ctx.session, 'ofc_packet_filter', f)
self.assertEqual(_filter.ofc_id, "ofc-" + f[:-4])
def testk_exists_ofc_packet_filter(self):
"""test exists_ofc_packet_filter."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertFalse(self.ofc.exists_ofc_packet_filter(self.ctx, f))
pf = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_packet_filter(self.ctx, f, pf)
self.assertTrue(self.ofc.exists_ofc_packet_filter(self.ctx, f))
def testl_delete_ofc_packet_filter(self):
"""test delete ofc_filter."""
t, n, p, f, none = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_network(self.ctx, t, n)
pf = {'tenant_id': t, 'network_id': n}
self.ofc.create_ofc_packet_filter(self.ctx, f, pf)
self.assertTrue(ndb.get_ofc_item(self.ctx.session,
'ofc_packet_filter', f))
self.ofc.delete_ofc_packet_filter(self.ctx, f)
self.assertFalse(ndb.get_ofc_item(self.ctx.session,
'ofc_packet_filter', f))
class OFCManagerRouterTest(OFCManagerTestBase):
def get_random_params(self):
tenant = uuidutils.generate_uuid()
router = uuidutils.generate_uuid()
network = uuidutils.generate_uuid()
return (tenant, router, network)
def test_create_ofc_router(self):
"""test create ofc_router"""
t, r, _n = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r))
self.ofc.create_ofc_router(self.ctx, t, r, 'test router')
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r))
router = ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)
self.assertEqual(router.ofc_id, "ofc-" + r[:-4])
def test_exists_ofc_router(self):
"""test exists_ofc_router"""
t, r, _n = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.assertFalse(self.ofc.exists_ofc_router(self.ctx, r))
self.ofc.create_ofc_router(self.ctx, t, r)
self.assertTrue(self.ofc.exists_ofc_router(self.ctx, r))
def test_delete_ofc_router(self):
"""test delete ofc_router"""
t, r, _n = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_router(self.ctx, t, r)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r))
self.ofc.delete_ofc_router(self.ctx, r, {'tenant_id': t})
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', r))
def test_router_interface(self):
t, r, n = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_router(self.ctx, t, r)
self.ofc.create_ofc_network(self.ctx, t, n)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r))
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n))
p = {'id': uuidutils.generate_uuid(),
'network_id': n, 'ip_address': '10.1.1.1', 'cidr': '10.1.0.0/20',
'mac_address': '11:22:33:44:55:66'}
self.ofc.add_ofc_router_interface(self.ctx, r, p['id'], p)
self.assertTrue(ndb.get_ofc_item(self.ctx.session,
'ofc_port', p['id']))
self.ofc.delete_ofc_router_interface(self.ctx, r, p['id'])
self.assertFalse(ndb.get_ofc_item(self.ctx.session,
'ofc_port', p['id']))
self.ofc.delete_ofc_router(self.ctx, r, {'tenant_id': t})
self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', r))
def test_router_route(self):
t, r, _n = self.get_random_params()
self.ofc.create_ofc_tenant(self.ctx, t)
self.ofc.create_ofc_router(self.ctx, t, r)
self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r))
routes = [{'destination': '2.2.2.0/24', 'nexthop': '1.1.1.10'}]
self.ofc.update_ofc_router_route(self.ctx, r, routes)
self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 1)
routes = [{'destination': '3.3.3.0/24', 'nexthop': '1.1.1.11'},
{'destination': '4.4.4.0/24', 'nexthop': '1.1.1.11'}]
self.ofc.update_ofc_router_route(self.ctx, r, routes)
self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 2)
routes = [{'destination': '2.2.2.0/24', 'nexthop': '1.1.1.10'}]
self.ofc.update_ofc_router_route(self.ctx, r, routes)
self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 1)
routes = []
self.ofc.update_ofc_router_route(self.ctx, r, routes)
self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 0)
| apache-2.0 |
tabletcorry/dt_utils | dt_utils/monitor.py | 1 | 4080 | #!/usr/bin/env python2.7
import os
from time import sleep, time
import socket
from dt_utils.status import read_status
import redis
import cPickle as pickle
import struct
hostname = socket.gethostname()
if '.' in hostname:
hostname = hostname[:hostname.find('.')]
INTERVAL = 10
GRAPHITE_HOST = 'graphite'
GRAPHITE_PORT = 2004
# Redis is <host> -> <service> -> <state>
# Where state is:
# <status>:<time>
# Where status is:
# up, down, flap
# Where time is:
# integer representing time in status
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((GRAPHITE_HOST, GRAPHITE_PORT))
stats_buffer = []
metric_path = "hosts.{0}.DT.{1}.{2}"
def prepare_for_graphite(status, service, now):
run_time = now - status.tai
restarted = run_time <= INTERVAL
service = service.replace('.', '_')
stats_buffer.append((metric_path.format(hostname, service, "uptime"),
(now, run_time)))
if restarted:
stats_buffer.append((metric_path.format(hostname, service, "restart"),
(now, 1)))
interval_total = 0
def send_to_graphite():
global stats_buffer, interval_total
if interval_total >= 6:
interval_total = 0
payload = pickle.dumps(stats_buffer)
header = struct.pack("!L", len(payload))
message = header + payload
s.send(message)
interval_total += 1
stats_buffer = []
def monitor(root_path, host, port=6379, db=0):
services = os.listdir(root_path)
r = redis.StrictRedis(host=host, port=port, db=db)
host_storage_name = 'dt-monitor:host:' + hostname
r.sadd('dt-monitor:hosts', host_storage_name)
heartbeat_accum = 0
flapping = {}
states = {}
while True:
new_states = {}
now = int(time())
for service in services:
status = read_status(os.path.join(root_path, service))
up_string = 'up'
if not status.up:
up_string = 'down'
if now - status.tai <= INTERVAL:
print status
print flapping
if service in flapping:
up_string = 'flap'
else:
flapping[service] = status.tai
else:
if service in flapping:
del flapping[service]
prepare_for_graphite(status, service, now)
state_string = "{0}:{1}".format(up_string, status.tai if up_string != 'flap' else flapping[service])
if service in states:
if states[service] == state_string:
continue
new_states[service] = state_string
if len(new_states) > 0:
r.hmset(host_storage_name, new_states)
for service, state_string in new_states.items():
states[service] = state_string
if state_string.startswith('up:'):
r.srem('dt-monitor:services:down', host_storage_name + ':' + service)
r.srem('dt-monitor:services:flap', host_storage_name + ':' + service)
elif state_string.startswith('down:'):
r.sadd('dt-monitor:services:down', host_storage_name + ':' + service)
r.srem('dt-monitor:services:flap', host_storage_name + ':' + service)
elif state_string.startswith('flap:'):
r.sadd('dt-monitor:services:flap', host_storage_name + ':' + service)
r.srem('dt-monitor:services:down', host_storage_name + ':' + service)
sleep(INTERVAL)
if not r.sismember('dt-monitor:hosts', host_storage_name):
r.sadd('dt-monitor:hosts', host_storage_name)
states = {}
heartbeat_accum += INTERVAL
if heartbeat_accum % 60 > 0:
heartbeat_accum = 0
r.hset('dt-monitor:heartbeat', host_storage_name, now)
send_to_graphite()
if __name__ == "__main__":
import sys
GRAPHITE_HOST = sys.argv[4]
monitor(sys.argv[1], sys.argv[2], int(sys.argv[3]))
| mit |
BTA-BATA/electrum-bta-master | lib/wallet.py | 1 | 72921 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import os
import hashlib
import ast
import threading
import random
import time
import math
import json
import copy
from operator import itemgetter
from util import print_msg, print_error, NotEnoughFunds
from util import profiler
from bitcoin import *
from account import *
from version import *
from transaction import Transaction
from plugins import run_hook
import bitcoin
from synchronizer import WalletSynchronizer
from mnemonic import Mnemonic
# internal ID for imported account
IMPORTED_ACCOUNT = '/x'
class WalletStorage(object):
def __init__(self, path):
self.lock = threading.RLock()
self.data = {}
self.path = path
self.file_exists = False
print_error( "wallet path", self.path )
if self.path:
self.read(self.path)
def read(self, path):
"""Read the contents of the wallet file."""
try:
with open(self.path, "r") as f:
data = f.read()
except IOError:
return
try:
self.data = json.loads(data)
except:
try:
d = ast.literal_eval(data) #parse raw data from reading wallet file
except Exception as e:
raise IOError("Cannot read wallet file '%s'" % self.path)
self.data = {}
# In old versions of Electrum labels were latin1 encoded, this fixes breakage.
labels = d.get('labels', {})
for i, label in labels.items():
try:
unicode(label)
except UnicodeDecodeError:
d['labels'][i] = unicode(label.decode('latin1'))
for key, value in d.items():
try:
json.dumps(key)
json.dumps(value)
except:
print_error('Failed to convert label to json format', key)
continue
self.data[key] = value
self.file_exists = True
def get(self, key, default=None):
with self.lock:
v = self.data.get(key)
if v is None:
v = default
else:
v = copy.deepcopy(v)
return v
def put(self, key, value, save = True):
try:
json.dumps(key)
json.dumps(value)
except:
print_error("json error: cannot save", key)
return
with self.lock:
if value is not None:
self.data[key] = copy.deepcopy(value)
elif key in self.data:
self.data.pop(key)
if save:
self.write()
def write(self):
assert not threading.currentThread().isDaemon()
temp_path = "%s.tmp.%s" % (self.path, os.getpid())
s = json.dumps(self.data, indent=4, sort_keys=True)
with open(temp_path, "w") as f:
f.write(s)
f.flush()
os.fsync(f.fileno())
# perform atomic write on POSIX systems
try:
os.rename(temp_path, self.path)
except:
os.remove(self.path)
os.rename(temp_path, self.path)
if 'ANDROID_DATA' not in os.environ:
import stat
os.chmod(self.path,stat.S_IREAD | stat.S_IWRITE)
class Abstract_Wallet(object):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
def __init__(self, storage):
self.storage = storage
self.electrum_version = ELECTRUM_VERSION
self.gap_limit_for_change = 6 # constant
# saved fields
self.seed_version = storage.get('seed_version', NEW_SEED_VERSION)
self.use_change = storage.get('use_change',True)
self.use_encryption = storage.get('use_encryption', False)
self.seed = storage.get('seed', '') # encrypted
self.labels = storage.get('labels', {})
self.frozen_addresses = set(storage.get('frozen_addresses',[]))
self.stored_height = storage.get('stored_height', 0) # last known height (for offline mode)
self.history = storage.get('addr_history',{}) # address -> list(txid, height)
self.fee_per_kb = int(storage.get('fee_per_kb', RECOMMENDED_FEE))
# This attribute is set when wallet.start_threads is called.
self.synchronizer = None
# imported_keys is deprecated. The GUI should call convert_imported_keys
self.imported_keys = self.storage.get('imported_keys',{})
self.load_accounts()
self.load_transactions()
self.build_reverse_history()
# load requests
self.receive_requests = self.storage.get('payment_requests', {})
# spv
self.verifier = None
# Transactions pending verification. Each value is the transaction height. Access with self.lock.
self.unverified_tx = {}
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3',{})
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
self.lock = threading.Lock()
self.transaction_lock = threading.Lock()
self.tx_event = threading.Event()
self.check_history()
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type, True)
@profiler
def load_transactions(self):
self.txi = self.storage.get('txi', {})
self.txo = self.storage.get('txo', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None and (tx_hash not in self.pruned_txo.values()):
print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
@profiler
def save_transactions(self):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
# Flush storage only with the last put
self.storage.put('transactions', tx, False)
self.storage.put('txi', self.txi, False)
self.storage.put('txo', self.txo, False)
self.storage.put('pruned_txo', self.pruned_txo, True)
def clear_history(self):
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.pruned_txo = {}
self.save_transactions()
with self.lock:
self.history = {}
self.tx_addr_hist = {}
self.storage.put('addr_history', self.history, True)
@profiler
def build_reverse_history(self):
self.tx_addr_hist = {}
for addr, hist in self.history.items():
for tx_hash, h in hist:
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
@profiler
def check_history(self):
save = False
for addr, hist in self.history.items():
if not self.is_mine(addr):
self.history.pop(addr)
save = True
continue
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo.values() or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
tx.deserialize()
self.add_transaction(tx_hash, tx, tx_height)
if save:
self.storage.put('addr_history', self.history, True)
# wizard action
def get_action(self):
pass
def basename(self):
return os.path.basename(self.storage.path)
def convert_imported_keys(self, password):
for k, v in self.imported_keys.items():
sec = pw_decode(v, password)
pubkey = public_key_from_private_key(sec)
address = public_key_to_bc_address(pubkey.decode('hex'))
if address != k:
raise InvalidPassword()
self.import_key(sec, password)
self.imported_keys.pop(k)
self.storage.put('imported_keys', self.imported_keys)
def load_accounts(self):
self.accounts = {}
d = self.storage.get('accounts', {})
for k, v in d.items():
if self.wallet_type == 'old' and k in [0, '0']:
v['mpk'] = self.storage.get('master_public_key')
self.accounts['0'] = OldAccount(v)
elif v.get('imported'):
self.accounts[k] = ImportedAccount(v)
elif v.get('xpub'):
self.accounts[k] = BIP32_Account(v)
elif v.get('pending'):
try:
self.accounts[k] = PendingAccount(v)
except:
pass
else:
print_error("cannot load account", v)
def synchronize(self):
pass
def can_create_accounts(self):
return False
def set_up_to_date(self,b):
with self.lock: self.up_to_date = b
def is_up_to_date(self):
with self.lock: return self.up_to_date
def update(self):
self.up_to_date = False
while not self.is_up_to_date():
time.sleep(0.1)
def is_imported(self, addr):
account = self.accounts.get(IMPORTED_ACCOUNT)
if account:
return addr in account.get_addresses(0)
else:
return False
def has_imported_keys(self):
account = self.accounts.get(IMPORTED_ACCOUNT)
return account is not None
def import_key(self, sec, password):
assert self.can_import(), 'This wallet cannot import private keys'
try:
pubkey = public_key_from_private_key(sec)
address = public_key_to_bc_address(pubkey.decode('hex'))
except Exception:
raise Exception('Invalid private key')
if self.is_mine(address):
raise Exception('Address already in wallet')
if self.accounts.get(IMPORTED_ACCOUNT) is None:
self.accounts[IMPORTED_ACCOUNT] = ImportedAccount({'imported':{}})
self.accounts[IMPORTED_ACCOUNT].add(address, pubkey, sec, password)
self.save_accounts()
# force resynchronization, because we need to re-run add_transaction
if addr in self.history:
self.history.pop(addr)
if self.synchronizer:
self.synchronizer.add(address)
return address
def delete_imported_key(self, addr):
account = self.accounts[IMPORTED_ACCOUNT]
account.remove(addr)
if not account.get_addresses(0):
self.accounts.pop(IMPORTED_ACCOUNT)
self.save_accounts()
def set_label(self, name, text = None):
changed = False
old_text = self.labels.get(name)
if text:
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
self.storage.put('labels', self.labels, True)
run_hook('set_label', name, text, changed)
return changed
def addresses(self, include_change = True):
return list(addr for acc in self.accounts for addr in self.get_account_addresses(acc, include_change))
def is_mine(self, address):
return address in self.addresses(True)
def is_change(self, address):
if not self.is_mine(address): return False
acct, s = self.get_address_index(address)
if s is None: return False
return s[0] == 1
def get_address_index(self, address):
for acc_id in self.accounts:
for for_change in [0,1]:
addresses = self.accounts[acc_id].get_addresses(for_change)
if address in addresses:
return acc_id, (for_change, addresses.index(address))
raise Exception("Address not found", address)
def get_private_key(self, address, password):
if self.is_watching_only():
return []
account_id, sequence = self.get_address_index(address)
return self.accounts[account_id].get_private_key(sequence, self, password)
def get_public_keys(self, address):
account_id, sequence = self.get_address_index(address)
return self.accounts[account_id].get_pubkeys(*sequence)
def sign_message(self, address, message, password):
keys = self.get_private_key(address, password)
assert len(keys) == 1
sec = keys[0]
key = regenerate_key(sec)
compressed = is_compressed(sec)
return key.sign_message(message, compressed, address)
def decrypt_message(self, pubkey, message, password):
address = public_key_to_bc_address(pubkey.decode('hex'))
keys = self.get_private_key(address, password)
secret = keys[0]
ec = regenerate_key(secret)
decrypted = ec.decrypt_message(message)
return decrypted
def add_unverified_tx(self, tx_hash, tx_height):
if tx_height > 0:
with self.lock:
self.unverified_tx[tx_hash] = tx_height
def add_verified_tx(self, tx_hash, info):
with self.lock:
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
self.storage.put('verified_tx3', self.verified_tx, True)
self.network.trigger_callback('updated')
def get_unverified_txs(self):
'''Returns a list of tuples (tx_hash, height) that are unverified and not beyond local height'''
txs = []
with self.lock:
for tx_hash, tx_height in self.unverified_tx.items():
# do not request merkle branch before headers are available
if tx_hash not in self.verified_tx and tx_height <= self.get_local_height():
txs.append((tx_hash, tx_height))
return txs
def undo_verifications(self, height):
'''Used by the verifier when a reorg has happened'''
txs = []
with self.lock:
for tx_hash, item in self.verified_tx:
tx_height, timestamp, pos = item
if tx_height >= height:
self.verified_tx.pop(tx_hash, None)
txs.append(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.stored_height
def get_confirmations(self, tx):
""" return the number of confirmations of a monitored transaction. """
with self.lock:
if tx in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx]
conf = (self.get_local_height() - height + 1)
if conf <= 0: timestamp = None
elif tx in self.unverified_tx:
conf = -1
timestamp = None
else:
conf = 0
timestamp = None
return conf, timestamp
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
x = self.verified_tx.get(tx_hash)
y = self.unverified_tx.get(tx_hash)
if x:
height, timestamp, pos = x
return height, pos
elif y:
return y, 0
else:
return 1e12, 0
def is_found(self):
return self.history.values() != [[]] * len(self.history)
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.history.get(address, []))
def get_tx_delta(self, tx_hash, address):
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo.values():
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx):
""" effect of tx on wallet """
addresses = self.addresses(True)
is_relevant = False
is_send = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for item in tx.inputs:
addr = item.get('address')
if addr in addresses:
is_send = True
is_relevant = True
d = self.txo.get(item['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == item['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_send:
is_partial = False
for addr, value in tx.get_outputs():
v_out += value
if addr in addresses:
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_send:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
is_send = v < 0
else:
# all inputs are mine
fee = v_out - v_in
return is_relevant, is_send, v, fee
def get_addr_io(self, address):
h = self.history.get(address, [])
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
return coins
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
# return the balance of a bitcoin address: confirmed and matured, unconfirmed, unmatured
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
def get_spendable_coins(self, domain = None, exclude_frozen = True):
coins = []
if domain is None:
domain = self.addresses(True)
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
c = self.get_addr_utxo(addr)
for txo, v in c.items():
tx_height, value, is_cb = v
if is_cb and tx_height + COINBASE_MATURITY > self.get_local_height():
continue
prevout_hash, prevout_n = txo.split(':')
output = {
'address':addr,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
coins.append((tx_height, output))
continue
# sort by age
if coins:
coins = sorted(coins)
if coins[-1][0] != 0:
while coins[0][0] == 0:
coins = coins[1:] + [ coins[0] ]
return [value for height, value in coins]
def get_account_name(self, k):
return self.labels.get(k, self.accounts[k].get_name(k))
def get_account_names(self):
account_names = {}
for k in self.accounts.keys():
account_names[k] = self.get_account_name(k)
return account_names
def get_account_addresses(self, acc_id, include_change=True):
if acc_id is None:
addr_list = self.addresses(include_change)
elif acc_id in self.accounts:
acc = self.accounts[acc_id]
addr_list = acc.get_addresses(0)
if include_change:
addr_list += acc.get_addresses(1)
return addr_list
def get_account_from_address(self, addr):
"Returns the account that contains this address, or None"
for acc_id in self.accounts: # similar to get_address_index but simpler
if addr in self.get_account_addresses(acc_id):
return acc_id
return None
def get_account_balance(self, account):
return self.get_balance(self.get_account_addresses(account))
def get_frozen_balance(self):
return self.get_balance(self.frozen_addresses)
def get_balance(self, domain=None):
if domain is None:
domain = self.addresses(True)
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def set_fee(self, fee, save = True):
self.fee_per_kb = fee
self.storage.put('fee_per_kb', self.fee_per_kb, save)
def get_address_history(self, address):
with self.lock:
return self.history.get(address, [])
def get_status(self, h):
if not h:
return None
status = ''
for tx_hash, height in h:
status += tx_hash + ':%d:' % height
return hashlib.sha256( status ).digest().encode('hex')
def find_pay_to_pubkey_address(self, prevout_hash, prevout_n):
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
print_error("found pay-to-pubkey address:", addr)
return addr
def add_transaction(self, tx_hash, tx, tx_height):
is_coinbase = tx.inputs[0].get('is_coinbase') == True
with self.transaction_lock:
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs:
addr = txi.get('address')
if not txi.get('is_coinbase'):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d'%prevout_n
if addr == "(pubkey)":
addr = self.find_pay_to_pubkey_address(prevout_hash, prevout_n)
# find value from prev output
if addr and self.is_mine(addr):
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
if d.get(addr) is None:
d[addr] = []
d[addr].append((ser, v))
break
else:
self.pruned_txo[ser] = tx_hash
# add outputs
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs):
ser = tx_hash + ':%d'%n
_type, x, v = txo
if _type == 'address':
addr = x
elif _type == 'pubkey':
addr = public_key_to_bc_address(x.decode('hex'))
else:
addr = None
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
# give v to txi that spends me
next_tx = self.pruned_txo.get(ser)
if next_tx is not None:
self.pruned_txo.pop(ser)
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = []
dd[addr].append((ser, v))
# save
self.transactions[tx_hash] = tx
def remove_transaction(self, tx_hash, tx_height):
with self.transaction_lock:
print_error("removing tx from history", tx_hash)
#tx = self.transactions.pop(tx_hash)
for ser, hh in self.pruned_txo.items():
if hh == tx_hash:
self.pruned_txo.pop(ser)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in dd.items():
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
l.remove(item)
self.pruned_txo[ser] = next_tx
if l == []:
dd.pop(addr)
else:
dd[addr] = l
self.txi.pop(tx_hash)
self.txo.pop(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx, tx_height)
#self.network.pending_transactions_for_notifications.append(tx)
self.add_unverified_tx(tx_hash, tx_height)
def receive_history_callback(self, addr, hist):
with self.lock:
old_hist = self.history.get(addr, [])
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
# remove tx if it's not referenced in histories
self.tx_addr_hist[tx_hash].remove(addr)
if not self.tx_addr_hist[tx_hash]:
self.remove_transaction(tx_hash, height)
self.history[addr] = hist
self.storage.put('addr_history', self.history, True)
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
s = self.tx_addr_hist.get(tx_hash, set())
s.add(addr)
self.tx_addr_hist[tx_hash] = s
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
tx.deserialize()
self.add_transaction(tx_hash, tx, tx_height)
def get_history(self, domain=None):
from collections import defaultdict
# get domain
if domain is None:
domain = self.get_account_addresses(None)
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash, delta in tx_deltas.items():
conf, timestamp = self.get_confirmations(tx_hash)
history.append((tx_hash, conf, delta, timestamp))
history.sort(key = lambda x: self.get_txpos(x[0]))
history.reverse()
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for item in history:
tx_hash, conf, delta, timestamp = item
h2.append((tx_hash, conf, delta, timestamp, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
print_error("Error: history not synchronized")
return []
return h2
def get_label(self, tx_hash):
label = self.labels.get(tx_hash)
is_default = (label == '') or (label is None)
if is_default:
label = self.get_default_label(tx_hash)
return label, is_default
def get_default_label(self, tx_hash):
if self.txi.get(tx_hash) == {}:
d = self.txo.get(tx_hash, {})
labels = []
for addr in d.keys():
label = self.labels.get(addr)
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_fee(self, tx):
# this method can be overloaded
return tx.get_fee()
def estimated_fee(self, tx):
estimated_size = len(tx.serialize(-1))/2
fee = int(self.fee_per_kb*estimated_size/1000.)
if fee < MIN_RELAY_TX_FEE: # tx.required_fee(self):
fee = MIN_RELAY_TX_FEE
if fee < tx.required_fee(self): # check fee meets the required fee
fee = tx.required_fee(self)
return fee
def make_unsigned_transaction(self, coins, outputs, fixed_fee=None, change_addr=None):
# check outputs
for type, data, value in outputs:
if type == 'address':
assert is_address(data), "Address " + data + " is invalid!"
amount = sum(map(lambda x:x[2], outputs))
total = fee = 0
inputs = []
tx = Transaction.from_io(inputs, outputs)
# add old inputs first
for item in coins:
v = item.get('value')
total += v
self.add_input_info(item)
tx.add_input(item)
# no need to estimate fee until we have reached desired amount
if total < amount:
continue
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx)
if total >= amount + fee:
break
else:
raise NotEnoughFunds()
# remove unneeded inputs
for item in sorted(tx.inputs, key=itemgetter('value')):
v = item.get('value')
if total - v >= amount + fee:
tx.inputs.remove(item)
total -= v
fee = fixed_fee if fixed_fee is not None else self.estimated_fee(tx)
else:
break
print_error("using %d inputs"%len(tx.inputs))
# change address
if not change_addr:
# send change to one of the accounts involved in the tx
address = inputs[0].get('address')
account, _ = self.get_address_index(address)
if self.use_change and self.accounts[account].has_change():
# New change addresses are created only after a few confirmations.
# Choose an unused change address if any, otherwise take one at random
change_addrs = self.accounts[account].get_addresses(1)[-self.gap_limit_for_change:]
for change_addr in change_addrs:
if self.get_num_tx(change_addr) == 0:
break
else:
change_addr = random.choice(change_addrs)
else:
change_addr = address
# if change is above dust threshold, add a change output.
change_amount = total - ( amount + fee )
if fixed_fee is not None and change_amount > 0:
tx.outputs.append(('address', change_addr, change_amount))
elif change_amount > DUST_THRESHOLD:
tx.outputs.append(('address', change_addr, change_amount))
# recompute fee including change output
fee = self.estimated_fee(tx)
# remove change output
tx.outputs.pop()
# if change is still above dust threshold, re-add change output.
change_amount = total - ( amount + fee )
if change_amount > DUST_THRESHOLD:
tx.outputs.append(('address', change_addr, change_amount))
print_error('change', change_amount)
else:
print_error('not keeping dust', change_amount)
else:
print_error('not keeping dust', change_amount)
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
run_hook('make_unsigned_transaction', tx)
return tx
def mktx(self, outputs, password, fee=None, change_addr=None, domain=None):
coins = self.get_spendable_coins(domain)
tx = self.make_unsigned_transaction(coins, outputs, fee, change_addr)
self.sign_transaction(tx, password)
return tx
def add_input_info(self, txin):
address = txin['address']
account_id, sequence = self.get_address_index(address)
account = self.accounts[account_id]
redeemScript = account.redeem_script(*sequence)
pubkeys = account.get_pubkeys(*sequence)
x_pubkeys = account.get_xpubkeys(*sequence)
# sort pubkeys and x_pubkeys, using the order of pubkeys
pubkeys, x_pubkeys = zip( *sorted(zip(pubkeys, x_pubkeys)))
txin['pubkeys'] = list(pubkeys)
txin['x_pubkeys'] = list(x_pubkeys)
txin['signatures'] = [None] * len(pubkeys)
if redeemScript:
txin['redeemScript'] = redeemScript
txin['num_sig'] = account.m
else:
txin['redeemPubkey'] = account.get_pubkey(*sequence)
txin['num_sig'] = 1
def sign_transaction(self, tx, password):
if self.is_watching_only():
return
# check that the password is correct. This will raise if it's not.
self.check_password(password)
keypairs = {}
x_pubkeys = tx.inputs_to_sign()
for x in x_pubkeys:
sec = self.get_private_key_from_xpubkey(x, password)
if sec:
keypairs[ x ] = sec
if keypairs:
tx.sign(keypairs)
run_hook('sign_transaction', tx, password)
def sendtx(self, tx):
# synchronous
h = self.send_tx(tx)
self.tx_event.wait()
return self.receive_tx(h, tx)
def send_tx(self, tx):
# asynchronous
self.tx_event.clear()
self.network.send([('blockchain.transaction.broadcast', [str(tx)])], self.on_broadcast)
return tx.hash()
def on_broadcast(self, r):
self.tx_result = r.get('result')
self.tx_event.set()
def receive_tx(self, tx_hash, tx):
out = self.tx_result
if out != tx_hash:
return False, "error: " + out
run_hook('receive_tx', tx, self)
return True, out
def update_password(self, old_password, new_password):
if new_password == '':
new_password = None
if self.has_seed():
decoded = self.get_seed(old_password)
self.seed = pw_encode( decoded, new_password)
self.storage.put('seed', self.seed, True)
imported_account = self.accounts.get(IMPORTED_ACCOUNT)
if imported_account:
imported_account.update_password(old_password, new_password)
self.save_accounts()
if hasattr(self, 'master_private_keys'):
for k, v in self.master_private_keys.items():
b = pw_decode(v, old_password)
c = pw_encode(b, new_password)
self.master_private_keys[k] = c
self.storage.put('master_private_keys', self.master_private_keys, True)
self.use_encryption = (new_password != None)
self.storage.put('use_encryption', self.use_encryption,True)
def is_frozen(self, addr):
return addr in self.frozen_addresses
def set_frozen_state(self, addrs, freeze):
'''Set frozen state of the addresses to FREEZE, True or False'''
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
self.storage.put('frozen_addresses', list(self.frozen_addresses), True)
return True
return False
def set_verifier(self, verifier):
self.verifier = verifier
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx (tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = self.verified_tx.keys() + self.unverified_tx.keys()
for tx_hash in self.transactions.keys():
if tx_hash not in vr:
print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
from verifier import SPV
self.network = network
if self.network is not None:
self.verifier = SPV(self.network, self)
self.verifier.start()
self.set_verifier(self.verifier)
self.synchronizer = WalletSynchronizer(self, network)
network.jobs.append(self.synchronizer.main_loop)
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
self.verifier.stop()
self.network.jobs.remove(self.synchronizer.main_loop)
self.synchronizer = None
self.storage.put('stored_height', self.get_local_height(), True)
def restore(self, cb):
pass
def get_accounts(self):
return self.accounts
def add_account(self, account_id, account):
self.accounts[account_id] = account
self.save_accounts()
def save_accounts(self):
d = {}
for k, v in self.accounts.items():
d[k] = v.dump()
self.storage.put('accounts', d, True)
def can_import(self):
return not self.is_watching_only()
def can_export(self):
return not self.is_watching_only()
def is_used(self, address):
h = self.history.get(address,[])
c, u, x = self.get_addr_balance(address)
return len(h), len(h) > 0 and c + u + x == 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def address_is_old(self, address, age_limit=2):
age = -1
h = self.history.get(address, [])
for tx_hash, tx_height in h:
if tx_height == 0:
tx_age = 0
else:
tx_age = self.get_local_height() - tx_height + 1
if tx_age > age:
age = tx_age
return age > age_limit
def can_sign(self, tx):
if self.is_watching_only():
return False
if tx.is_complete():
return False
for x in tx.inputs_to_sign():
if self.can_sign_xpubkey(x):
return True
return False
def get_private_key_from_xpubkey(self, x_pubkey, password):
if x_pubkey[0:2] in ['02','03','04']:
addr = bitcoin.public_key_to_bc_address(x_pubkey.decode('hex'))
if self.is_mine(addr):
return self.get_private_key(addr, password)[0]
elif x_pubkey[0:2] == 'ff':
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
for k, v in self.master_public_keys.items():
if v == xpub:
xprv = self.get_master_private_key(k, password)
if xprv:
_, _, _, c, k = deserialize_xkey(xprv)
return bip32_private_key(sequence, k, c)
elif x_pubkey[0:2] == 'fe':
xpub, sequence = OldAccount.parse_xpubkey(x_pubkey)
for k, account in self.accounts.items():
if xpub in account.get_master_pubkeys():
pk = account.get_private_key(sequence, self, password)
return pk[0]
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
addr = hash_160_to_bc_address(x_pubkey[4:].decode('hex'), addrtype)
if self.is_mine(addr):
return self.get_private_key(addr, password)[0]
else:
raise BaseException("z")
def can_sign_xpubkey(self, x_pubkey):
if x_pubkey[0:2] in ['02','03','04']:
addr = bitcoin.public_key_to_bc_address(x_pubkey.decode('hex'))
return self.is_mine(addr)
elif x_pubkey[0:2] == 'ff':
if not isinstance(self, BIP32_Wallet): return False
xpub, sequence = BIP32_Account.parse_xpubkey(x_pubkey)
return xpub in [ self.master_public_keys[k] for k in self.master_private_keys.keys() ]
elif x_pubkey[0:2] == 'fe':
if not isinstance(self, OldWallet): return False
xpub, sequence = OldAccount.parse_xpubkey(x_pubkey)
return xpub == self.get_master_public_key()
elif x_pubkey[0:2] == 'fd':
addrtype = ord(x_pubkey[2:4].decode('hex'))
addr = hash_160_to_bc_address(x_pubkey[4:].decode('hex'), addrtype)
return self.is_mine(addr)
else:
raise BaseException("z")
def is_watching_only(self):
False
def can_change_password(self):
return not self.is_watching_only()
def get_unused_address(self, account):
# fixme: use slots from expired requests
domain = self.get_account_addresses(account, include_change=False)
for addr in domain:
if not self.history.get(addr) and addr not in self.receive_requests.keys():
return addr
def get_payment_request(self, addr, config):
import util
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
out['URI'] = 'batacoin:' + addr + '?amount=' + util.format_satoshis(out.get('amount'))
out['status'] = self.get_request_status(addr)
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr)
path = os.path.join(rdir, key + '.bip70')
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, key + '.bip70')
out['URI'] += '&r=' + out['request_url']
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
return out
def get_request_status(self, key):
from paymentrequest import PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
r = self.receive_requests[key]
address = r['address']
amount = r.get('amount')
timestamp = r.get('timestamp', 0)
expiration = r.get('expiration')
if amount:
if self.up_to_date:
paid = amount <= self.get_addr_received(address)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
else:
status = PR_UNKNOWN
return status
def add_payment_request(self, addr, amount, message, expiration, config):
import paymentrequest, shutil, os
timestamp = int(time.time())
_id = Hash(addr + "%d"%timestamp).encode('hex')[0:10]
r = {'timestamp':timestamp, 'amount':amount, 'expiration':expiration, 'address':addr, 'memo':message, 'id':_id}
self.receive_requests[addr] = r
self.storage.put('payment_requests', self.receive_requests)
self.set_label(addr, message) # should be a default label
rdir = config.get('requests_dir')
req = self.get_payment_request(addr, config)
if rdir and amount is not None:
if not os.path.exists(rdir):
os.mkdir(rdir)
index = os.path.join(rdir, 'index.html')
if not os.path.exists(index):
src = os.path.join(os.path.dirname(__file__), 'www', 'index.html')
shutil.copy(src, index)
key = req.get('id', addr)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, key + '.bip70')
with open(path, 'w') as f:
f.write(pr)
# reload
req = self.get_payment_request(addr, config)
with open(os.path.join(rdir, key + '.json'), 'w') as f:
f.write(json.dumps(req))
return req
def remove_payment_request(self, addr, config):
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr)
for s in ['.json', '.bip70']:
n = os.path.join(rdir, key + s)
if os.path.exists(n):
os.unlink(n)
self.storage.put('payment_requests', self.receive_requests)
return True
def get_sorted_requests(self, config):
return sorted(map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys()), key=itemgetter('timestamp'))
class Imported_Wallet(Abstract_Wallet):
wallet_type = 'imported'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
a = self.accounts.get(IMPORTED_ACCOUNT)
if not a:
self.accounts[IMPORTED_ACCOUNT] = ImportedAccount({'imported':{}})
def is_watching_only(self):
acc = self.accounts[IMPORTED_ACCOUNT]
n = acc.keypairs.values()
return len(n) > 0 and n == [[None, None]] * len(n)
def has_seed(self):
return False
def is_deterministic(self):
return False
def check_password(self, password):
self.accounts[IMPORTED_ACCOUNT].get_private_key((0,0), self, password)
def is_used(self, address):
h = self.history.get(address,[])
return len(h), False
def get_master_public_keys(self):
return {}
def is_beyond_limit(self, address, account, is_change):
return False
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
def has_seed(self):
return self.seed != ''
def is_deterministic(self):
return True
def is_watching_only(self):
return not self.has_seed()
def add_seed(self, seed, password):
if self.seed:
raise Exception("a seed exists")
self.seed_version, self.seed = self.format_seed(seed)
if password:
self.seed = pw_encode( self.seed, password)
self.use_encryption = True
else:
self.use_encryption = False
self.storage.put('seed', self.seed, False)
self.storage.put('seed_version', self.seed_version, False)
self.storage.put('use_encryption', self.use_encryption,True)
def get_seed(self, password):
return pw_decode(self.seed, password)
def get_mnemonic(self, password):
return self.get_seed(password)
def change_gap_limit(self, value):
assert isinstance(value, int), 'gap limit must be of type int, not of %s'%type(value)
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit, True)
return True
elif value >= self.min_acceptable_gap():
for key, account in self.accounts.items():
addresses = account.get_addresses(False)
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
account.receiving_pubkeys = account.receiving_pubkeys[0:n]
account.receiving_addresses = account.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit, True)
self.save_accounts()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
k = 0
for a in addresses[::-1]:
if self.history.get(a):break
k = k + 1
return k
def min_acceptable_gap(self):
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
for account in self.accounts.values():
addresses = account.get_addresses(0)
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if self.history.get(a):
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def default_account(self):
return self.accounts['0']
def create_new_address(self, account=None, for_change=0):
if account is None:
account = self.default_account()
address = account.create_new_address(for_change)
self.add_address(address)
return address
def add_address(self, address):
if address not in self.history:
self.history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.save_accounts()
def synchronize(self):
with self.lock:
for account in self.accounts.values():
account.synchronize(self)
def restore(self, callback):
from i18n import _
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
apply(callback, (msg,))
time.sleep(0.1)
def wait_for_network():
while not self.network.is_connected():
msg = "%s \n" % (_("Connecting..."))
apply(callback, (msg,))
time.sleep(0.1)
# wait until we are connected, because the user might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def is_beyond_limit(self, address, account, is_change):
if type(account) == ImportedAccount:
return False
addr_list = account.get_addresses(is_change)
i = addr_list.index(address)
prev_addresses = addr_list[:max(0, i)]
limit = self.gap_limit_for_change if is_change else self.gap_limit
if len(prev_addresses) < limit:
return False
prev_addresses = prev_addresses[max(0, i - limit):]
for addr in prev_addresses:
if self.history.get(addr):
return False
return True
def get_action(self):
if not self.get_master_public_key():
return 'create_seed'
if not self.accounts:
return 'create_accounts'
def get_master_public_keys(self):
out = {}
for k, account in self.accounts.items():
if type(account) == ImportedAccount:
continue
name = self.get_account_name(k)
mpk_text = '\n\n'.join(account.get_master_pubkeys())
out[name] = mpk_text
return out
class BIP32_Wallet(Deterministic_Wallet):
# abstract class, bip32 logic
root_name = 'x/'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.master_public_keys = storage.get('master_public_keys', {})
self.master_private_keys = storage.get('master_private_keys', {})
self.gap_limit = storage.get('gap_limit', 20)
def is_watching_only(self):
return not bool(self.master_private_keys)
def can_import(self):
return False
def get_master_public_key(self):
return self.master_public_keys.get(self.root_name)
def get_master_private_key(self, account, password):
k = self.master_private_keys.get(account)
if not k: return
xprv = pw_decode(k, password)
try:
deserialize_xkey(xprv)
except:
raise InvalidPassword()
return xprv
def check_password(self, password):
xpriv = self.get_master_private_key(self.root_name, password)
xpub = self.master_public_keys[self.root_name]
if deserialize_xkey(xpriv)[3] != deserialize_xkey(xpub)[3]:
raise InvalidPassword()
def add_master_public_key(self, name, xpub):
if xpub in self.master_public_keys.values():
raise BaseException('Duplicate master public key')
self.master_public_keys[name] = xpub
self.storage.put('master_public_keys', self.master_public_keys, True)
def add_master_private_key(self, name, xpriv, password):
self.master_private_keys[name] = pw_encode(xpriv, password)
self.storage.put('master_private_keys', self.master_private_keys, True)
def derive_xkeys(self, root, derivation, password):
x = self.master_private_keys[root]
root_xprv = pw_decode(x, password)
xprv, xpub = bip32_private_derivation(root_xprv, root, derivation)
return xpub, xprv
def create_master_keys(self, password):
seed = self.get_seed(password)
self.add_cosigner_seed(seed, self.root_name, password)
def add_cosigner_seed(self, seed, name, password, passphrase=''):
# we don't store the seed, only the master xpriv
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed, passphrase))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
self.add_master_private_key(name, xprv, password)
def add_cosigner_xpub(self, seed, name):
# store only master xpub
xprv, xpub = bip32_root(self.mnemonic_to_seed(seed,''))
xprv, xpub = bip32_private_derivation(xprv, "m/", self.root_derivation)
self.add_master_public_key(name, xpub)
def mnemonic_to_seed(self, seed, password):
return Mnemonic.mnemonic_to_seed(seed, password)
def make_seed(self, lang=None):
return Mnemonic(lang).make_seed()
def format_seed(self, seed):
return NEW_SEED_VERSION, ' '.join(seed.split())
class BIP32_Simple_Wallet(BIP32_Wallet):
# Wallet with a single BIP32 account, no seed
# gap limit 20
wallet_type = 'xpub'
def create_xprv_wallet(self, xprv, password):
xpub = bitcoin.xpub_from_xprv(xprv)
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version, True)
self.add_master_private_key(self.root_name, xprv, password)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
self.use_encryption = (password != None)
self.storage.put('use_encryption', self.use_encryption,True)
def create_xpub_wallet(self, xpub):
account = BIP32_Account({'xpub':xpub})
self.storage.put('seed_version', self.seed_version, True)
self.add_master_public_key(self.root_name, xpub)
self.add_account('0', account)
class BIP32_HD_Wallet(BIP32_Wallet):
# wallet that can create accounts
def __init__(self, storage):
self.next_account = storage.get('next_account2', None)
BIP32_Wallet.__init__(self, storage)
def can_create_accounts(self):
return self.root_name in self.master_private_keys.keys()
def addresses(self, b=True):
l = BIP32_Wallet.addresses(self, b)
if self.next_account:
_, _, _, next_address = self.next_account
if next_address not in l:
l.append(next_address)
return l
def get_address_index(self, address):
if self.next_account:
next_id, next_xpub, next_pubkey, next_address = self.next_account
if address == next_address:
return next_id, (0,0)
return BIP32_Wallet.get_address_index(self, address)
def num_accounts(self):
keys = []
for k, v in self.accounts.items():
if type(v) != BIP32_Account:
continue
keys.append(k)
i = 0
while True:
account_id = '%d'%i
if account_id not in keys:
break
i += 1
return i
def get_next_account(self, password):
account_id = '%d'%self.num_accounts()
derivation = self.root_name + "%d'"%int(account_id)
xpub, xprv = self.derive_xkeys(self.root_name, derivation, password)
self.add_master_public_key(derivation, xpub)
if xprv:
self.add_master_private_key(derivation, xprv, password)
account = BIP32_Account({'xpub':xpub})
addr, pubkey = account.first_address()
self.add_address(addr)
return account_id, xpub, pubkey, addr
def create_main_account(self, password):
# First check the password is valid (this raises if it isn't).
self.check_password(password)
assert self.num_accounts() == 0
self.create_account('Main account', password)
def create_account(self, name, password):
account_id, xpub, _, _ = self.get_next_account(password)
account = BIP32_Account({'xpub':xpub})
self.add_account(account_id, account)
self.set_label(account_id, name)
# add address of the next account
self.next_account = self.get_next_account(password)
self.storage.put('next_account2', self.next_account)
def account_is_pending(self, k):
return type(self.accounts.get(k)) == PendingAccount
def delete_pending_account(self, k):
assert type(self.accounts.get(k)) == PendingAccount
self.accounts.pop(k)
self.save_accounts()
def create_pending_account(self, name, password):
if self.next_account is None:
self.next_account = self.get_next_account(password)
self.storage.put('next_account2', self.next_account)
next_id, next_xpub, next_pubkey, next_address = self.next_account
if name:
self.set_label(next_id, name)
self.accounts[next_id] = PendingAccount({'pending':True, 'address':next_address, 'pubkey':next_pubkey})
self.save_accounts()
def synchronize(self):
# synchronize existing accounts
BIP32_Wallet.synchronize(self)
if self.next_account is None and not self.use_encryption:
try:
self.next_account = self.get_next_account(None)
self.storage.put('next_account2', self.next_account)
except:
print_error('cannot get next account')
# check pending account
if self.next_account is not None:
next_id, next_xpub, next_pubkey, next_address = self.next_account
if self.address_is_old(next_address):
print_error("creating account", next_id)
self.add_account(next_id, BIP32_Account({'xpub':next_xpub}))
# here the user should get a notification
self.next_account = None
self.storage.put('next_account2', self.next_account)
elif self.history.get(next_address, []):
if next_id not in self.accounts:
print_error("create pending account", next_id)
self.accounts[next_id] = PendingAccount({'pending':True, 'address':next_address, 'pubkey':next_pubkey})
self.save_accounts()
class NewWallet(BIP32_Wallet, Mnemonic):
# Standard wallet
root_derivation = "m/"
wallet_type = 'standard'
def create_main_account(self, password):
xpub = self.master_public_keys.get("x/")
account = BIP32_Account({'xpub':xpub})
self.add_account('0', account)
class Multisig_Wallet(BIP32_Wallet, Mnemonic):
# generic m of n
root_name = "x1/"
root_derivation = "m/"
def __init__(self, storage):
BIP32_Wallet.__init__(self, storage)
self.wallet_type = storage.get('wallet_type')
m = re.match('(\d+)of(\d+)', self.wallet_type)
self.m = int(m.group(1))
self.n = int(m.group(2))
def load_accounts(self):
self.accounts = {}
d = self.storage.get('accounts', {})
v = d.get('0')
if v:
if v.get('xpub3'):
v['xpubs'] = [v['xpub'], v['xpub2'], v['xpub3']]
elif v.get('xpub2'):
v['xpubs'] = [v['xpub'], v['xpub2']]
self.accounts = {'0': Multisig_Account(v)}
def create_main_account(self, password):
account = Multisig_Account({'xpubs': self.master_public_keys.values(), 'm': self.m})
self.add_account('0', account)
def get_master_public_keys(self):
return self.master_public_keys
def get_action(self):
for i in range(self.n):
if self.master_public_keys.get("x%d/"%(i+1)) is None:
return 'create_seed' if i == 0 else 'add_cosigners'
if not self.accounts:
return 'create_accounts'
class OldWallet(Deterministic_Wallet):
wallet_type = 'old'
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 5)
def make_seed(self):
import old_mnemonic
seed = random_seed(128)
return ' '.join(old_mnemonic.mn_encode(seed))
def format_seed(self, seed):
import old_mnemonic
# see if seed was entered as hex
seed = seed.strip()
try:
assert seed
seed.decode('hex')
return OLD_SEED_VERSION, str(seed)
except Exception:
pass
words = seed.split()
seed = old_mnemonic.mn_decode(words)
if not seed:
raise Exception("Invalid seed")
return OLD_SEED_VERSION, seed
def create_master_keys(self, password):
seed = self.get_seed(password)
mpk = OldAccount.mpk_from_seed(seed)
self.storage.put('master_public_key', mpk, True)
def get_master_public_key(self):
return self.storage.get("master_public_key")
def get_master_public_keys(self):
return {'Main Account':self.get_master_public_key()}
def create_main_account(self, password):
mpk = self.storage.get("master_public_key")
self.create_account(mpk)
def create_account(self, mpk):
self.accounts['0'] = OldAccount({'mpk':mpk, 0:[], 1:[]})
self.save_accounts()
def create_watching_only_wallet(self, mpk):
self.seed_version = OLD_SEED_VERSION
self.storage.put('seed_version', self.seed_version, False)
self.storage.put('master_public_key', mpk, True)
self.create_account(mpk)
def get_seed(self, password):
seed = pw_decode(self.seed, password).encode('utf8')
return seed
def check_password(self, password):
seed = self.get_seed(password)
self.accounts['0'].check_seed(seed)
def get_mnemonic(self, password):
import old_mnemonic
s = self.get_seed(password)
return ' '.join(old_mnemonic.mn_encode(s))
wallet_types = [
# category type description constructor
('standard', 'old', ("Old wallet"), OldWallet),
('standard', 'xpub', ("BIP32 Import"), BIP32_Simple_Wallet),
('standard', 'standard', ("Standard wallet"), NewWallet),
('standard', 'imported', ("Imported wallet"), Imported_Wallet),
('multisig', '2of2', ("Multisig wallet (2 of 2)"), Multisig_Wallet),
('multisig', '2of3', ("Multisig wallet (2 of 3)"), Multisig_Wallet)
]
# former WalletFactory
class Wallet(object):
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
seed_version = storage.get('seed_version')
if not seed_version:
seed_version = OLD_SEED_VERSION if len(storage.get('master_public_key','')) == 128 else NEW_SEED_VERSION
if seed_version not in [OLD_SEED_VERSION, NEW_SEED_VERSION]:
msg = "Your wallet has an unsupported seed version."
msg += '\n\nWallet file: %s' % os.path.abspath(storage.path)
if seed_version in [5, 7, 8, 9, 10]:
msg += "\n\nTo open this wallet, try 'git checkout seed_v%d'"%seed_version
if seed_version == 6:
# version 1.9.8 created v6 wallets when an incorrect seed was entered in the restore dialog
msg += '\n\nThis file was created because of a bug in version 1.9.8.'
if storage.get('master_public_keys') is None and storage.get('master_private_keys') is None and storage.get('imported_keys') is None:
# pbkdf2 was not included with the binaries, and wallet creation aborted.
msg += "\nIt does not contain any keys, and can safely be removed."
else:
# creation was complete if electrum was run from source
msg += "\nPlease open this file with Electrum 1.9.8, and move your coins to a new wallet."
raise BaseException(msg)
wallet_type = storage.get('wallet_type')
if wallet_type:
for cat, t, name, c in wallet_types:
if t == wallet_type:
WalletClass = c
break
else:
if re.match('(\d+)of(\d+)', wallet_type):
WalletClass = Multisig_Wallet
else:
raise BaseException('unknown wallet type', wallet_type)
else:
if seed_version == OLD_SEED_VERSION:
WalletClass = OldWallet
else:
WalletClass = NewWallet
return WalletClass(storage)
@classmethod
def is_seed(self, seed):
if not seed:
return False
elif is_old_seed(seed):
return True
elif is_new_seed(seed):
return True
else:
return False
@classmethod
def is_old_mpk(self, mpk):
try:
int(mpk, 16)
assert len(mpk) == 128
return True
except:
return False
@classmethod
def is_xpub(self, text):
try:
assert text[0:4] in ('xpub', 'Ltub')
deserialize_xkey(text)
return True
except:
return False
@classmethod
def is_xprv(self, text):
try:
assert text[0:4] in ('xprv', 'Ltpv')
deserialize_xkey(text)
return True
except:
return False
@classmethod
def is_address(self, text):
if not text:
return False
for x in text.split():
if not bitcoin.is_address(x):
return False
return True
@classmethod
def is_private_key(self, text):
if not text:
return False
for x in text.split():
if not bitcoin.is_private_key(x):
return False
return True
@classmethod
def from_seed(self, seed, password, storage):
if is_old_seed(seed):
klass = OldWallet
elif is_new_seed(seed):
klass = NewWallet
w = klass(storage)
w.add_seed(seed, password)
w.create_master_keys(password)
w.create_main_account(password)
return w
@classmethod
def from_address(self, text, storage):
w = Imported_Wallet(storage)
for x in text.split():
w.accounts[IMPORTED_ACCOUNT].add(x, None, None, None)
w.save_accounts()
return w
@classmethod
def from_private_key(self, text, password, storage):
w = Imported_Wallet(storage)
w.update_password(None, password)
for x in text.split():
w.import_key(x, password)
return w
@classmethod
def from_old_mpk(self, mpk, storage):
w = OldWallet(storage)
w.seed = ''
w.create_watching_only_wallet(mpk)
return w
@classmethod
def from_xpub(self, xpub, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xpub_wallet(xpub)
return w
@classmethod
def from_xprv(self, xprv, password, storage):
w = BIP32_Simple_Wallet(storage)
w.create_xprv_wallet(xprv, password)
return w
@classmethod
def from_multisig(klass, key_list, password, storage, wallet_type):
storage.put('wallet_type', wallet_type, True)
self = Multisig_Wallet(storage)
key_list = sorted(key_list, key = lambda x: klass.is_xpub(x))
for i, text in enumerate(key_list):
assert klass.is_seed(text) or klass.is_xprv(text) or klass.is_xpub(text)
name = "x%d/"%(i+1)
if klass.is_xprv(text):
xpub = bitcoin.xpub_from_xprv(text)
self.add_master_public_key(name, xpub)
self.add_master_private_key(name, text, password)
elif klass.is_xpub(text):
self.add_master_public_key(name, text)
elif klass.is_seed(text):
if name == 'x1/':
self.add_seed(text, password)
self.create_master_keys(password)
else:
self.add_cosigner_seed(text, name, password)
self.use_encryption = (password != None)
self.storage.put('use_encryption', self.use_encryption, True)
self.create_main_account(password)
return self
| gpl-3.0 |
OriHoch/knesset-data-pipelines | datapackage_pipelines_knesset/common/influxdb.py | 4 | 1779 | import os, requests, logging
def send_metric(measurement, tags, values, num_retries=0, must_succeed=False):
url = os.environ.get("DPP_INFLUXDB_URL")
db = os.environ.get("DPP_INFLUXDB_DB")
if tags and url and db:
line_tags = ",".join(["{}={}".format(k, v) for k, v in tags.items()])
line_values = ",".join(["{}={}".format(k, v) for k, v in values.items()])
old_logging_level = logging.getLogger().level
logging.getLogger().setLevel(logging.ERROR)
res = requests.post("{url}/write?db={db}".format(url=url, db=db),
'{measurement},{tags} {values}'.format(measurement=measurement,
tags=line_tags, values=line_values))
logging.getLogger().setLevel(old_logging_level)
if res.status_code == 404 and res.json()["error"].startswith("database not found:"):
if num_retries > 0:
raise Exception("Failed to create InfluxDB database")
logging.getLogger().setLevel(logging.ERROR)
res = requests.post("{url}/query".format(url=url), {"q": "CREATE DATABASE {db}".format(db=db)})
logging.getLogger().setLevel(old_logging_level)
res.raise_for_status()
return send_metric(measurement, tags, values, num_retries+1)
elif res.status_code == 200:
return True
else:
res.raise_for_status()
elif must_succeed:
raise Exception("missing required environment variables")
def send_metric_parameters(measurement, tags, values, parameters):
metric_tags = parameters.get("metric-tags", {})
if len(metric_tags) > 0:
tags.update(metric_tags)
return send_metric(measurement, tags, values)
| mit |
TheDSCPL/SSRE_2017-2018_group8 | Projeto/Python/cryptopy/crypto/cipher/ccm_test.py | 1 | 29917 | # -*- coding: utf-8 -*-
#! /usr/bin/env python
""" crypto.cipher.ccm_test
Tests for CCM encryption, uses AES for base algorithm
Copyright © (c) 2002 by Paul A. Lambert
Read LICENSE.txt for license information.
July 24, 2002
"""
import unittest
from crypto.cipher.ccm import CCM
from crypto.cipher.aes import AES
from crypto.cipher.base import noPadding
from crypto.common import xor
from binascii_plus import a2b_p, b2a_p
class CCM_AES128_TestVectors(unittest.TestCase):
""" Test CCM with AES128 algorithm using know values """
def testKnowValues(self):
""" Test using vectors from..."""
def CCMtestVector(testCase,macSize,key,nonce,addAuth,pt,kct):
""" CCM test vectors using AES algorithm """
print '%s %s %s'%('='*((54-len(testCase))/2),testCase,'='*((54-len(testCase))/2))
key,nonce,pt,addAuth,kct = a2b_p(key),a2b_p(nonce),a2b_p(pt),a2b_p(addAuth),a2b_p(kct)
alg = CCM(AES(key,keySize=len(key)),macSize=macSize, nonceSize=len(nonce))
print 'alg=%s%skeySize=%3d blockSize=%3d M=%2d L=%2d'%(alg.baseCipher.name,
' '*(10-len(alg.baseCipher.name)),
alg.keySize, alg.blockSize, alg.M, alg.L)
print 'key: %s'%b2a_p(key)[9:]
print 'nonce: %s'%b2a_p(nonce)[9:]
print 'addAuth:%s'%b2a_p(addAuth)[9:]
print 'pt: %s'%b2a_p(pt)[9:]
ct = alg.encrypt(pt,nonce=nonce,addAuthData=addAuth)
print 'ct: %s'%b2a_p(ct)[9:]
print 'kct: %s'%b2a_p(kct)[9:]
print '========================================================'
self.assertEqual( ct, kct )
dct = alg.decrypt(ct,nonce=nonce,addAuthData=addAuth)
self.assertEqual( dct, pt )
CCMtestVector(
testCase = "CCM Packet Vector #1 (from D.W.)",
macSize = 8,
key = "C0 C1 C2 C3 C4 C5 C6 C7 C8 C9 CA CB CC CD CE CF",
nonce = "00 00 00 03 02 01 00 A0 A1 A2 A3 A4 A5",
addAuth = "00 01 02 03 04 05 06 07",
pt = """ 08 09 0A 0B 0C 0D 0E 0F
10 11 12 13 14 15 16 17 18 19 1A 1B 1C 1D 1E """,
kct = """ 58 8C 97 9A 61 C6 63 D2
F0 66 D0 C2 C0 F9 89 80 6D 5F 6B 61 DA C3 84 17
E8 D1 2C FD F9 26 E0 """)
CCMtestVector(
testCase = "IEEE 802.11 Data Packet, no A4 and no QC",
macSize = 8,
key = "c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf",
nonce = "00 00 02 2d 49 97 b4 06 05 04 03 02 01",
addAuth = """08 41 00 06 25 a7 c4 36 00 02 2d 49 97 b4 00 06
25 a7 c4 36 e0 00""",
pt = """aa aa 03 00 00 00 88 8e 01 01 00 00 00 00 00 00
00""",
kct = """1e e5 2d 13 b1 be 3f 20 42 5b 3f de dd d4 55 2b
98 71 d8 7b 65 8c fd 57 f7 """)
CCMtestVector(
testCase = "IEEE 802.11 Data Packet, no A4 and no QC, retry",
macSize = 8,
key = "c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf",
nonce = "00 00 02 2d 49 97 b4 06 05 04 03 02 01",
addAuth = """08 41 00 06 25 a7 c4 36 00 02 2d 49 97 b4 00 06
25 a7 c4 36 e0 00 """,
pt = """aa aa 03 00 00 00 88 8e 01 01 00 00 00 00 00 00
00""",
kct = """1e e5 2d 13 b1 be 3f 20 42 5b 3f de dd d4 55 2b
98 71 d8 7b 65 8c fd 57 f7 """)
CCMtestVector(
testCase = "IEEE 802.11 Data Packet,A4 with no QC ",
macSize = 8,
key = "c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf",
nonce = "00 00 02 2d 49 97 b4 00 00 00 00 00 01",
addAuth = """08 43 00 06 25 a7 c4 36 00 02 2d 49 97 b4 00 06
25 a7 c4 36 e0 00 41 42 43 44 45 46 """,
pt = """aa aa 03 00 00 00 88 8e 01 01 00 00 00 00 00 00
00""",
kct = """3b e9 b2 46 c6 fc 7a 51 55 1e 14 c6 a8 85 28 bc
06 56 67 c8 ef 30 b3 12 69 """)
CCMtestVector(
testCase = "IEEE 802.11 Data Packet,A4 and QC ",
macSize = 8,
key = "c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf",
nonce = "04 00 02 2d 49 97 b4 00 00 00 00 00 01",
addAuth = """88 43 00 06 25 a7 c4 36 00 02 2d 49 97 b4 00 06
25 a7 c4 36 e0 00 41 42 43 44 45 46 04 00""",
pt = """aa aa 03 00 00 00 88 8e 01 01 00 00 00 00 00 00
00""",
kct = """46 72 f2 9e 41 54 e9 11 58 47 c2 a9 ae dc 10 0c
e8 82 53 bd a2 04 ae 1d 33 """)
CCMtestVector(
testCase = "IEEE 802.11 Data Packet,QC no A4",
macSize = 8,
key = "c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf",
nonce = "04 00 02 2d 49 97 b4 00 00 00 00 00 01",
addAuth = """88 41 00 06 25 a7 c4 36 00 02 2d 49 97 b4 00 06
25 a7 c4 36 e0 00 04 00 """,
pt = """aa aa 03 00 00 00 88 8e 01 01 00 00 00 00 00 00
00""",
kct = """46 72 f2 9e 41 54 e9 11 58 47 c2 a9 ae dc 10 0c
e8 dc 91 98 bf 6a 52 c8 03 """)
CCMtestVector(
testCase = "IEEE 802.11 Data Packet,QC no A4",
macSize = 8,
key = "c0 c1 c2 c3 c4 c5 c6 c7 c8 c9 ca cb cc cd ce cf",
nonce = "04 00 02 2d 49 97 b4 00 00 00 00 00 01",
addAuth = """88 41 00 06 25 a7 c4 36 00 02 2d 49 97 b4 00 06
25 a7 c4 36 e0 00 04 00 """,
pt = """aa aa 03 00 00 00 88 8e 01 01 00 00 00 00 00 00
00""",
kct = """46 72 f2 9e 41 54 e9 11 58 47 c2 a9 ae dc 10 0c
e8 dc 91 98 bf 6a 52 c8 03 """)
CCMtestVector(
testCase = "IEEE 802.11 Data Packet, no A4, No QC, WEP preset",
macSize = 8,
key = "00 01 02 03 04 05 06 07 08 c9 0a 0b 0c 0d 0e 0f",
nonce = "00 00 02 2d 49 97 b4 06 05 04 03 02 01",
addAuth = """08 41 00 06 25 a7 c4 36 00 02 2d 49 97 b4 00 06
25 a7 c4 36 e0 00 """,
pt = """aa aa 03 00 00 00 88 8e 01 01 00 00 00 00 00 00
00""",
kct = """de bf 2c c9 94 e6 5a 70 2c ee e3 19 84 21 39 c3
f2 9a 2e 12 63 11 74 5f 3c """)
CCMtestVector(
testCase = "KAT# 1 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "04 e5 1c f0 20 2d 4c 59 0f d2 e1 28 a5 7c 50 30",
nonce = "f1 84 44 08 ab ae a5 b8 fc ba 33 2e 78",
addAuth = """ """,
pt = """ """,
kct = """6f b0 8f 1f a0 ec e1 f0 """)
CCMtestVector(
testCase = "KAT# 2 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "c4 85 98 ee 34 6c 62 1e c9 7c 1f 67 ce 37 11 85",
nonce = "51 4a 8a 19 f2 bd d5 2f 3a b5 03 97 76",
addAuth = """0c """,
pt = """e7 """,
kct = """13 6d 5e af 39 d5 d3 6f 27 """)
CCMtestVector(
testCase = "KAT# 3 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "f8 ba 1a 55 d0 2f 85 ae 96 7b b6 2f b6 cd a8 eb",
nonce = "7e 78 a0 50 68 dd e8 3a 11 40 85 a2 ea",
addAuth = """10 """,
pt = """ """,
kct = """b8 01 6f 2e fc 56 b2 31 """)
CCMtestVector(
testCase = "KAT# 4 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "0c 84 68 50 ee c1 76 2c 88 de af 2e e9 f4 6a 07",
nonce = "cc ee 9b fb 82 2d 5d 12 fe 9e 30 8f 7a",
addAuth = """ """,
pt = """05 """,
kct = """7d d0 b5 77 e9 0c 1c de b5 """)
CCMtestVector(
testCase = "KAT# 5 - AES_CCM 128 M= 4 L= 2",
macSize = 4,
key = "77 a5 59 75 29 27 20 97 a6 03 d5 91 31 f3 cb ba",
nonce = "97 ea 83 a0 63 4b 5e d7 62 7e b9 df 22",
addAuth = """17 fc 89 c1 fc 0d 63 98 c3 d9 57 7d f7 63 c8 b6
a8 8a df 36 91 """,
pt = """5e 05 74 03 42 de 19 41 """,
kct = """0c 5f 95 1b 27 29 6a 16 a8 2a 32 d5 """)
CCMtestVector(
testCase = "KAT# 6 - AES_CCM 128 M= 6 L= 2",
macSize = 6,
key = "8b ca 94 dd 82 f4 ea 74 bf a2 1f 09 1e 67 85 40",
nonce = "cf b7 a6 2e 88 01 3b d6 d3 af fc c1 91",
addAuth = """ca 30 a0 e7 50 07 97 22 71 """,
pt = """04 1e bc 2f dc a0 f3 a5 ae 2c 1b d0 36 83 1c 95
49 6c 5f 4d bf 3d 55 9e 72 de 80 2a 18 """,
kct = """ad 81 34 7b 1f 61 6e b5 34 c0 e9 a0 7b ed 92 57
11 cf 4a 4b 2c 3f 9b 01 25 7a 9a e2 76 e6 c1 83
f0 2f ad """)
CCMtestVector(
testCase = "KAT# 7 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "df 11 db 8e f8 22 73 47 01 59 14 0d d6 46 a2 2f",
nonce = "c5 d6 81 5d 5a 6d 72 40 ee a5 8c 89 a2",
addAuth = """70 7f cf 24 b3 2d 38 33 0c f6 70 a5 5a 0f e3 4f
ad 2b 1c 29 """,
pt = """eb c9 6c 76 02 """,
kct = """55 17 b7 c5 78 27 3c dd 6a 15 43 9c a0 """)
CCMtestVector(
testCase = "KAT# 8 - AES_CCM 128 M=10 L= 2",
macSize = 10,
key = "eb d8 72 fb c3 f3 a0 74 89 8f 8b 2f bb 90 96 66",
nonce = "d6 c6 38 d6 82 45 de c6 9a 74 80 f3 51",
addAuth = """c9 6b e2 76 fb e6 c1 27 f2 8a 8c 8e 58 32 f8 b3
41 a2 19 a5 74 """,
pt = """94 6b """,
kct = """55 cd b0 f0 72 a0 b4 31 37 85 31 55 """)
CCMtestVector(
testCase = "KAT# 9 - AES_CCM 128 M=12 L= 2",
macSize = 12,
key = "3b b2 5e fd de ff 30 12 2f df d0 66 9d a7 ff e0",
nonce = "3c 0e 37 28 96 9b 95 4f 26 3a 80 18 a9",
addAuth = """f9 a6 """,
pt = """ef 70 a8 b0 51 46 24 81 92 2e 93 fa 94 71 ac 65
77 3f 5a f2 84 30 fd ab bf f9 43 b9 """,
kct = """aa 27 4b a3 37 2e f5 d6 cc ae fe 16 8f de 14 63
37 83 e7 d3 0b cc 4a 8f dc f0 18 c9 c6 79 e8 b9
10 95 43 3b bf f2 89 f0 """)
CCMtestVector(
testCase = "KAT# 10 - AES_CCM 128 M=14 L= 2",
macSize = 14,
key = "98 c7 fe 73 71 62 4c 9f fd 3c b3 d9 fb 77 6a f7",
nonce = "1e ea 4e 1f 58 80 4b 97 17 23 0a d0 61",
addAuth = """c2 fc a1 """,
pt = """46 41 5c 6b 81 ec a4 89 89 ab fd a2 2d 3a 0b fc
9c c1 fc 07 93 63 """,
kct = """64 e2 0b 0c ef d8 2a 00 27 ed 0f f2 90 1b d3 b7
b0 67 6c 1c 4a 4c b7 5c 40 0f db 9c 87 9e 99 c5
77 1a 9a 52 """)
CCMtestVector(
testCase = "KAT# 11 - AES_CCM 128 M=16 L= 2",
macSize = 16,
key = "eb 1d 3c 1d b4 a4 c5 e2 66 8d 9b 50 f4 fd 56 f6",
nonce = "ef ec 95 20 16 91 83 57 0c 4c cd ee a0",
addAuth = """48 1b db 34 98 0e 03 81 24 a1 db 1a 89 2b ec 36
6a ce 5e ec 40 73 e7 23 98 be ca 86 f4 b3 """,
pt = """50 a4 20 b9 3c ef f4 e7 62 """,
kct = """6f 55 64 96 29 95 40 49 34 84 5e 64 dc 68 3c 5f
40 dc ec 0a 30 17 e5 df ee """)
CCMtestVector(
testCase = "KAT# 12 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "0c fd f2 47 24 c5 8e b8 35 66 53 39 e8 1c 37 c0",
nonce = "95 68 e2 e4 55 2d 5f 72 bb 70 ca 3f 3a",
addAuth = """5e 55 6e ac 1b f5 4b d5 4e db 23 21 75 43 03 02
4c 71 b0 ce fd """,
pt = """ae 60 c4 8b a9 b5 f8 2c 2f eb 07 e2 9d 82 6b 95
a7 64 """,
kct = """42 eb e1 dc be 9b 60 7b f8 d4 cb 21 6a 7b 5a 57
c7 1e 55 97 96 a1 a6 bf 33 2a """)
CCMtestVector(
testCase = "KAT# 13 - AES_CCM 128 M= 8 L= 3",
macSize = 8,
key = "cc dd 57 cb 5c 0e 5f cd 88 5e 9a 42 39 e9 b9 ca",
nonce = "d6 0d 64 37 59 79 c2 fc 9a c5 f0 99",
addAuth = """7d 86 5c 44 c0 6f 28 a6 46 b3 80 49 4b 50 """,
pt = """f6 85 9a fb 79 8b 8a 4b a4 ad 6d 31 99 85 bc 42
9e 8f 0a fa """,
kct = """fd f2 3f 1f c5 27 9e ec 06 b5 29 e7 69 96 da 50
f9 b3 16 5b 6b 27 2c c7 df 89 06 05 """)
CCMtestVector(
testCase = "KAT# 14 - AES_CCM 128 M= 8 L= 4",
macSize = 8,
key = "46 75 97 1a 48 d0 8c 5b c3 53 cb cd ba 82 e9 34",
nonce = "37 b3 25 a9 8f 9c 1b d9 c9 3c f3",
addAuth = """15 2d 76 """,
pt = """83 ab 9d 98 """,
kct = """db 12 ef 44 3e f0 a6 aa d4 2f 35 28 """)
CCMtestVector(
testCase = "KAT# 15 - AES_CCM 128 M= 8 L= 5",
macSize = 8,
key = "32 c6 33 dd 03 9e 4d 75 20 c7 40 ec 29 fa 75 9b",
nonce = "53 f8 69 fe 27 9a f0 f9 f8 a6",
addAuth = """2e e1 a3 04 cf 1d 3e 75 fe """,
pt = """54 16 e3 52 bf d2 70 3d 24 2f 66 c1 ef 48 9e 49
bc 3c fe 3f ce 38 95 82 0e 87 """,
kct = """11 62 22 64 5e 6c a0 d1 c9 95 3a 1b 00 04 59 4e
3c 90 f0 56 c6 04 f5 37 7e 5a d3 c0 50 0b 33 3a
4c 19 """)
CCMtestVector(
testCase = "KAT# 16 - AES_CCM 128 M= 8 L= 6",
macSize = 8,
key = "91 f2 47 2d 7a 12 1c 9c dd 4b 6c 90 80 67 5a 10",
nonce = "20 aa 00 eb 1f ed cb c9 33",
addAuth = """9d 52 4a e1 96 d8 ec 48 62 02 be 5c 45 45 67 2a """,
pt = """e8 a8 29 8c 0b aa 91 90 34 7c eb 9a ab ff d8 3d
48 86 e5 c2 53 e2 """,
kct = """86 09 aa 4b 03 c5 67 99 a9 84 4d 4d 62 75 c0 bd
09 43 f2 69 12 46 88 fa fd ae 6e 06 6a 73 """)
CCMtestVector(
testCase = "KAT# 17 - AES_CCM 128 M= 8 L= 7",
macSize = 8,
key = "e3 14 d7 0f 1f 9e 85 e7 d2 d6 59 6e 55 d4 f9 a8",
nonce = "12 e4 a2 8a f7 f3 71 4d",
addAuth = """f6 62 2e 59 32 f2 18 45 09 23 76 d4 a0 62 a1 5e
4f aa 28 8b 84 35 bc d8 ac 5a 7e c4 44 e8 """,
pt = """4b """,
kct = """5a be a2 22 f4 13 94 50 27 """)
CCMtestVector(
testCase = "KAT# 18 - AES_CCM 128 M= 8 L= 8",
macSize = 8,
key = "50 53 8b 62 e8 14 02 c2 ee 11 8a 66 62 b4 77 07",
nonce = "7e d7 94 53 e4 a1 8d",
addAuth = """48 d5 42 89 16 be 95 29 35 37 b9 aa 08 """,
pt = """60 43 8c c6 48 4d 6e d0 50 b0 1e 77 fd 8e 43 19
81 a2 33 6d 02 f8 cb 84 """,
kct = """bf fa bd 07 33 ed 9f 6c 90 7c b6 32 0a bf 32 7e
c3 a5 78 85 5b f2 e2 56 72 c9 3c cc a4 a3 f2 9c """)
CCMtestVector(
testCase = "KAT# 19 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "98 8a bd c2 3a 65 bb 5d cd 99 f9 42 67 d3 0b 45",
nonce = "c7 8e 7d fa 21 24 5a 43 90 8f 80 b3 8b",
addAuth = """ """,
pt = """0a 33 d2 12 79 8c f1 32 c5 51 db fd 53 27 7e b4
c9 e5 cc 07 e3 c2 e8 1c 58 2e 7d a6 c4 b1 34 5a
74 """,
kct = """f3 1f 8e fa 43 b4 cf 36 1d 20 34 62 05 b0 cc fd
c1 81 79 17 b4 99 c5 84 3e b6 6e c0 b9 6d 27 e5
85 9a a9 bd ae a8 00 d1 7a """)
CCMtestVector(
testCase = "KAT# 20 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "e0 d7 27 f8 10 5b 00 6d 88 22 96 89 5f 74 dc 0c",
nonce = "e5 99 95 c7 ed 2a b0 35 7b 0e 7b fd b6",
addAuth = """c5 """,
pt = """d0 44 95 d5 24 bb a6 5e 87 f8 5b 00 d0 48 56 4f
a2 04 df a9 9d 79 94 55 32 67 23 cd 7c 2f 7a 36
95 """,
kct = """ee b1 9c c2 e1 a3 71 3a a0 eb 2f da 57 f3 d3 d8
e2 c8 2d e1 2f 39 49 5a ce 8e b0 5d 14 07 9a a2
04 e6 29 62 3b a3 a3 0b ea """)
CCMtestVector(
testCase = "KAT# 21 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "df 19 a2 25 47 cd 66 d8 75 16 de 8a 18 22 54 26",
nonce = "c0 8c 9b 85 9a a1 86 50 89 59 2f 7c be",
addAuth = """9e 27 """,
pt = """16 5c 95 80 5b d4 ac a2 9d 4e 62 a2 84 31 1b 6f
5f a9 b8 2d 27 23 88 f2 92 2d 9b 7e """,
kct = """62 b3 51 dd bc e7 cd f5 80 e8 c8 fd 2b 79 e4 8e
42 31 11 32 52 b8 6e 7e bd 7a 73 3f 0c 85 7f a6
5d 2e 14 4a """)
CCMtestVector(
testCase = "KAT# 22 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "b1 20 0c 9a 41 66 58 4d 08 1b 9d ee 30 9b 9e e7",
nonce = "a7 89 88 53 2f 4c 75 0b 02 63 3f 1b d9",
addAuth = """86 52 43 02 de 79 1d 5c 3e 3b 3f 93 b5 2c 75 """,
pt = """92 0a a8 c6 d5 4e a8 d7 e6 c3 fb 9d 6d 9c 9f 8d
bb 1c ab bb 41 59 d8 93 80 f5 53 40 89 """,
kct = """2f 8c ac 1a b1 20 0c 0e 41 6c 95 91 b2 6e 89 07
75 9b 57 5a eb 90 76 14 f4 fe 64 bb 3c 45 ad 77
38 90 37 33 97 """)
CCMtestVector(
testCase = "KAT# 23 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "c1 56 c1 1d 02 ff 67 d7 72 bd d2 1f 33 59 12 be",
nonce = "5b 1f e2 48 8e 6c fe 20 23 77 61 d9 d0",
addAuth = """d9 da 29 4c d5 20 30 26 2e a0 10 25 e8 e4 20 1e """,
pt = """ """,
kct = """f1 22 23 f0 46 71 13 f1 """)
CCMtestVector(
testCase = "KAT# 24 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "1b 13 51 12 e0 2e 26 14 5a e5 55 0d 79 b1 5a fc",
nonce = "70 00 f2 88 ef 21 d3 28 61 7a de da 5b",
addAuth = """6b 27 87 7e cd 15 af 07 ea f3 06 4d c1 35 cd b9
64 """,
pt = """2e 01 11 9a a1 e1 b6 95 cd 74 22 96 84 8d 0e f2
40 ba 3d 29 56 75 7b 43 """,
kct = """b1 0d 5b a6 c6 9e bf 40 52 cc bf 5e 51 65 8c 95
3c 99 f3 9d 18 d8 34 f4 ed 7b b4 c9 7e 3a 6b 39 """)
CCMtestVector(
testCase = "KAT# 25 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "99 1c d1 06 71 c6 58 db fd 3e 86 b3 c2 51 ac 0f",
nonce = "d8 ea cc b4 56 b7 ff 99 98 b4 59 bc b3",
addAuth = """ed b9 79 05 b3 09 98 54 8f e1 05 d2 26 16 86 2d
1d 2d dc c7 33 cd 71 fe b5 a7 53 ae ba eb b1 7d """,
pt = """37 e6 72 ae 6a da 05 dd 88 9d """,
kct = """db f0 80 9a 74 d3 c6 62 0b f4 c5 1c 91 6c 93 16
01 f2 """)
CCMtestVector(
testCase = "KAT# 26 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "5f c7 0b 97 11 75 ff bc 69 da e3 e8 bf 08 73 bd",
nonce = "f8 bb 2c a4 db a6 59 98 d4 2a 28 56 c3",
addAuth = """71 04 9a 00 2a 1b e2 5c 7b f2 85 8c 31 18 0d ce
94 f1 8d 20 79 82 """,
pt = """ """,
kct = """0d 25 b4 0f 5a be 36 19 """)
CCMtestVector(
testCase = "KAT# 27 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "17 40 8b dc 9c 5e 13 94 29 35 dd 2e 7d bd 54 37",
nonce = "14 7a 47 0d ff ab 27 4c ab a4 38 5d f2",
addAuth = """ """,
pt = """df """,
kct = """dc 49 af 7a 17 61 ce e6 c7 """)
CCMtestVector(
testCase = "KAT# 28 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "96 c6 9e f0 99 fc e0 3d 12 fe 0d a0 67 71 6f 0c",
nonce = "ed 65 37 be f8 08 79 83 78 53 5d 4a 4c",
addAuth = """3a f1 fa c1 76 5a 19 29 cf 5c 5f 21 94 ac eb 3a
6d 7e 07 ca 76 fd d7 2b 6f e4 51 f8 c9 b4 b4 c4 """,
pt = """e0 35 """,
kct = """d9 b4 34 ee cd 33 3a e8 6c 24 """)
CCMtestVector(
testCase = "KAT# 29 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "ca f7 8d 13 4b e2 09 8f 32 62 af 07 32 7c 9f c0",
nonce = "88 f1 c3 89 76 70 b9 22 72 a1 ae 92 13",
addAuth = """38 fa 4b bd ca 0b 8f bb 94 1d 23 a1 84 40 """,
pt = """e2 16 5a """,
kct = """36 59 64 75 bc a9 1f 8e a1 54 81 """)
CCMtestVector(
testCase = "KAT# 30 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "ea 76 e2 95 20 f6 cd 6a 7e 43 b1 5f e4 71 df 47",
nonce = "68 1f 2c 11 7d 97 10 34 76 3c 3e c5 9b",
addAuth = """d2 30 20 84 10 67 54 a5 82 32 75 """,
pt = """4a 27 7e 05 """,
kct = """88 cc 3d 7d 34 da 0b 2e ff 30 97 e5 """)
CCMtestVector(
testCase = "KAT# 31 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "f6 84 fd e0 d5 87 c9 24 66 e0 d3 d6 d6 05 7c ed",
nonce = "aa 81 33 0b c8 f8 24 82 df 99 d7 57 6b",
addAuth = """47 99 ee 5f 0c 60 6a 8a d5 1c 04 16 ce 19 63 """,
pt = """49 7d 2b 08 01 """,
kct = """c0 2c e4 d5 88 3e e5 36 4a d9 fc c0 ac """)
CCMtestVector(
testCase = "KAT# 32 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "75 29 fb 1c db d2 72 2f 14 89 d7 c8 48 29 72 d7",
nonce = "17 35 b5 aa d2 90 97 28 2f e3 fa 11 37",
addAuth = """7d 13 21 e4 5c 3c 79 a4 29 78 4c 5c 1f 8c c0 """,
pt = """d8 94 34 c2 73 b7 """,
kct = """09 b0 a7 5e 52 66 8c d3 6d 29 97 59 c7 ea """)
CCMtestVector(
testCase = "KAT# 33 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "63 9d ac 12 4a 9a 47 03 9c 2b 63 8f 66 48 0b b8",
nonce = "2f 2d 59 97 8d 0e ef 44 3e 5a ce 50 9b",
addAuth = """c4 35 4b f6 ca f3 48 4e 6f 2a f3 6f ed ff 1f dc
0b """,
pt = """0b 22 97 03 7c 02 9e """,
kct = """b5 c3 f8 0f 56 b1 9d d8 2c f0 f0 cf dd 7a 22 """)
CCMtestVector(
testCase = "KAT# 34 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "25 69 2d 57 e1 de d4 f5 08 34 40 98 c8 fc 70 1e",
nonce = "a2 db d6 96 04 25 2c 2f d6 3e e7 a9 6b",
addAuth = """d4 fd 14 f8 18 57 """,
pt = """69 12 08 9c 94 60 c1 25 """,
kct = """81 58 32 b4 97 2d 35 e8 0e 9c 10 c0 e0 6b 58 64 """)
CCMtestVector(
testCase = "KAT# 35 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "f6 92 a4 95 3e b4 97 c1 cc f1 a1 47 ad 12 59 f1",
nonce = "e6 cd 88 fd 72 96 90 68 02 24 9d 5c b8",
addAuth = """db 1b f5 a4 56 93 74 fd cf 34 eb """,
pt = """fc dc 43 ed 68 17 37 ac 8d """,
kct = """ba d6 85 af 4c 35 15 03 26 6f 97 69 4f 54 62 7f
ed """)
CCMtestVector(
testCase = "KAT# 36 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "94 c6 17 0a 9b d0 c6 dc e5 66 7b be 9f e9 4e 5d",
nonce = "b2 bb 6a aa c5 88 ce fb 4e fc c6 2b 61",
addAuth = """a4 d9 a5 be 4f ee d2 bc eb 0d 9e 59 75 19 72 98
f3 be 45 6a 23 ef a9 c7 ed 56 14 """,
pt = """aa d9 7b 99 47 22 07 9a 25 30 """,
kct = """11 d7 a8 6e 94 9c 06 d2 48 15 60 2d ca a1 a1 8c
be 48 """)
CCMtestVector(
testCase = "KAT# 37 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "13 28 d8 ea cf 6f 77 da 12 24 72 5b bb 07 43 d2",
nonce = "21 43 19 d3 f8 67 20 f6 53 3a f5 f1 6c",
addAuth = """60 1b 7c aa d5 54 1e 9a 7e ea fa """,
pt = """d5 87 65 96 de 32 a1 e7 85 83 78 """,
kct = """22 1c f0 92 45 71 38 e7 00 21 af 45 d3 31 28 01
69 3a 47 """)
CCMtestVector(
testCase = "KAT# 38 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "54 7b 02 1b ef 8c 1c 0f f1 04 ba 1d bf 0e 2c 0b",
nonce = "f7 a8 59 5b d6 5d 23 e9 cb 17 b1 e1 92",
addAuth = """ """,
pt = """64 8c ec 53 c4 79 fe 41 53 17 ba 8e """,
kct = """6f 52 93 85 89 87 15 21 29 d5 dd 85 0d dc 3d 58
60 fb 8a b2 """)
CCMtestVector(
testCase = "KAT# 39 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "31 4b 5f 29 01 80 0f 0a 6a 4f ad 8d d1 9b b1 13",
nonce = "2a a9 31 4e c4 ef 5b 71 b5 3b 2c da 17",
addAuth = """92 52 e0 44 24 d5 29 f0 00 96 6b a8 87 90 0c 07
eb c1 a8 51 02 f0 d0 07 80 20 3d """,
pt = """40 8d 60 be f0 3c b1 8a 1f 4f 40 5d 9f """,
kct = """99 a7 5d db a3 72 9f c7 41 22 ba e0 25 4b 7f ba
05 a1 4b cf 09 """)
CCMtestVector(
testCase = "KAT# 40 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "b7 32 3f 57 b7 e5 3d b7 4e bd e0 88 d2 d3 e1 61",
nonce = "4e 78 a2 6b 45 93 d0 96 9c 8f 9f 63 c1",
addAuth = """f6 fb 75 4a eb 40 7a 91 6a d8 2c 27 13 01 97 9f
85 ff 01 80 8e 51 67 29 15 e5 72 """,
pt = """46 64 14 a0 82 4e 25 9d ef 30 9d 9a 1b 54 """,
kct = """f9 a1 27 c7 67 4b 39 ea 50 30 c3 eb 45 b1 ff b8
b4 5c 86 72 b2 7b """)
CCMtestVector(
testCase = "KAT# 41 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "48 28 39 bc 82 e9 4a d1 55 8f b6 79 0b 3e 36 6f",
nonce = "b8 ed 08 17 e6 f6 df 07 5e f7 87 d2 ef",
addAuth = """3c 16 89 0f 70 b2 1c ab ba 2b a7 84 35 b0 66 2a
b6 1c db 78 42 """,
pt = """52 13 fe 0f 90 8c c5 69 a1 6e 48 c8 c5 d3 92 """,
kct = """c0 64 0e a7 0a d5 46 f7 3e 7e 44 5f 96 78 f5 57
36 22 d9 77 74 93 a4 """)
CCMtestVector(
testCase = "KAT# 42 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "73 62 b6 9c e4 dd 9f 02 75 36 9a 60 24 e5 96 69",
nonce = "c0 a7 67 e3 ba 5c 5d 86 10 6e eb f4 9f",
addAuth = """2e 37 47 00 22 cc 59 91 a4 a4 13 a0 d8 5a c1 ef
eb 9a 4a """,
pt = """16 6a 40 a6 70 60 b2 a6 58 af 1a d5 73 8f 7d 12 """,
kct = """90 42 65 e5 35 01 bf dd 93 71 87 bd 4c 6c 29 bf
85 b1 c2 7a 92 70 bb c1 """)
CCMtestVector(
testCase = "KAT# 43 - AES_CCM 128 M= 8 L= 2",
macSize = 8,
key = "63 de fa 62 5f 45 09 34 78 8f b4 1b 32 69 cc 94",
nonce = "7f 9d 39 9d 87 26 be f8 10 71 92 90 30",
addAuth = """ee 42 eb """,
pt = """0f 3c 27 63 46 fe 7a 72 ad 46 6b 39 a5 62 d5 52
5a """,
kct = """68 72 5d 02 05 49 78 34 6a 7b f5 4e df c6 e6 d8
e6 6c 5c 7e e0 3d f8 0a b0 """)
# Make this test module runnable from the command prompt
if __name__ == "__main__":
unittest.main()
| mit |
lovelysystems/pyjamas | pygtkweb/030-aspectframe.py | 13 | 1230 | #!/usr/bin/env python
# example aspectframe.py
import pygtk
pygtk.require('2.0')
import gtk
class AspectFrameExample:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL);
window.set_title("Aspect Frame")
window.connect("destroy", lambda x: gtk.main_quit())
window.set_border_width(10)
# Create an aspect_frame and add it to our toplevel window
aspect_frame = gtk.AspectFrame("2x1", # label
0.5, # center x
0.5, # center y
2, # xsize/ysize = 2
False) # ignore child's aspect
window.add(aspect_frame)
aspect_frame.show()
# Now add a child widget to the aspect frame
drawing_area = gtk.DrawingArea()
# Ask for a 200x200 window, but the AspectFrame will give us a 200x100
# window since we are forcing a 2x1 aspect ratio
drawing_area.set_size_request(200, 200)
aspect_frame.add(drawing_area)
drawing_area.show()
window.show()
def main():
gtk.main()
return 0
if __name__ == "__main__":
AspectFrameExample()
main()
| apache-2.0 |
vmuriart/sqldef | src/parsers/sql2003.py | 1 | 322321 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# CAVEAT UTILITOR
#
# This file was automatically generated by Grako.
#
# https://pypi.python.org/pypi/grako/
#
# Any changes you make to it will be overwritten the next time
# the file is generated.
from __future__ import print_function, division, absolute_import, unicode_literals
from grako.parsing import graken, Parser
from grako.util import re, RE_FLAGS, generic_main # noqa
__version__ = (2016, 8, 1, 1, 11, 52, 0)
__all__ = [
'SqlParser',
'SqlSemantics',
'main'
]
KEYWORDS = set([])
class SqlParser(Parser):
def __init__(self,
whitespace='\\s+',
nameguard=None,
comments_re='/\\*[\\s\\S]*?\\*/',
eol_comments_re='--.*?$',
ignorecase=True,
left_recursion=True,
keywords=KEYWORDS,
namechars='',
**kwargs):
super(SqlParser, self).__init__(
whitespace=whitespace,
nameguard=nameguard,
comments_re=comments_re,
eol_comments_re=eol_comments_re,
ignorecase=ignorecase,
left_recursion=left_recursion,
keywords=keywords,
namechars=namechars,
**kwargs
)
@graken()
def _digit_(self):
self._pattern(r'\d+')
@graken()
def _double_quote_(self):
self._token('"')
@graken()
def _quote_(self):
self._token("'")
@graken()
def _left_paren_(self):
self._token('(')
@graken()
def _right_paren_(self):
self._token(')')
@graken()
def _asterisk_(self):
self._token('*')
@graken()
def _plus_sign_(self):
self._token('+')
@graken()
def _comma_(self):
self._token(',')
@graken()
def _minus_sign_(self):
self._token('-')
@graken()
def _period_(self):
self._token('.')
@graken()
def _solidus_(self):
self._token('/')
@graken()
def _colon_(self):
self._token(':')
@graken()
def _semicolon_(self):
self._token(';')
@graken()
def _less_than_operator_(self):
self._token('<')
@graken()
def _equals_operator_(self):
self._token('=')
@graken()
def _greater_than_operator_(self):
self._token('>')
@graken()
def _question_mark_(self):
self._token('?')
@graken()
def _left_bracket_or_trigraph_(self):
with self._choice():
with self._option():
self._left_bracket_()
with self._option():
self._left_bracket_trigraph_()
self._error('no available options')
@graken()
def _right_bracket_or_trigraph_(self):
with self._choice():
with self._option():
self._right_bracket_()
with self._option():
self._right_bracket_trigraph_()
self._error('no available options')
@graken()
def _left_bracket_(self):
self._token('[')
@graken()
def _left_bracket_trigraph_(self):
self._token('??(')
@graken()
def _right_bracket_(self):
self._token(']')
@graken()
def _right_bracket_trigraph_(self):
self._token('??)')
@graken()
def _underscore_(self):
self._token('_')
@graken()
def _regular_identifier_(self):
self._pattern(r'[a-z]\w*')
self._check_name()
@graken()
def _large_object_length_token_(self):
self._digit_()
self._multiplier_()
@graken()
def _multiplier_(self):
with self._choice():
with self._option():
self._token('K')
with self._option():
self._token('M')
with self._option():
self._token('G')
self._error('expecting one of: G K M')
@graken()
def _delimited_identifier_(self):
self._double_quote_()
self._delimited_identifier_body_()
self._double_quote_()
@graken()
def _delimited_identifier_body_(self):
self._pattern(r'(""|[^"\n])+')
@graken()
def _unicode_escape_value_(self):
with self._choice():
with self._option():
self._unicode_4_digit_escape_value_()
with self._option():
self._unicode_6_digit_escape_value_()
with self._option():
self._unicode_character_escape_value_()
self._error('no available options')
@graken()
def _unicode_4_digit_escape_value_(self):
self._unicode_escape_character_()
self._byte_()
self._byte_()
@graken()
def _unicode_6_digit_escape_value_(self):
self._unicode_escape_character_()
self._plus_sign_()
self._byte_()
self._byte_()
self._byte_()
@graken()
def _unicode_character_escape_value_(self):
self._unicode_escape_character_()
self._unicode_escape_character_()
@graken()
def _unicode_escape_character_(self):
self._token('\\U')
@graken()
def _not_equals_operator_(self):
self._token('<>')
@graken()
def _greater_than_or_equals_operator_(self):
self._token('>=')
@graken()
def _less_than_or_equals_operator_(self):
self._token('<=')
@graken()
def _concatenation_operator_(self):
self._token('||')
@graken()
def _right_arrow_(self):
self._token('->')
@graken()
def _double_colon_(self):
self._token('::')
@graken()
def _literal_(self):
with self._choice():
with self._option():
self._signed_numeric_literal_()
with self._option():
self._general_literal_()
self._error('no available options')
@graken()
def _unsigned_literal_(self):
with self._choice():
with self._option():
self._unsigned_numeric_literal_()
with self._option():
self._general_literal_()
self._error('no available options')
@graken()
def _general_literal_(self):
with self._choice():
with self._option():
self._character_string_literal_()
with self._option():
self._national_character_string_literal_()
with self._option():
self._unicode_character_string_literal_()
with self._option():
self._binary_string_literal_()
with self._option():
self._datetime_literal_()
with self._option():
self._interval_literal_()
with self._option():
self._boolean_literal_()
self._error('no available options')
@graken()
def _character_string_literal_(self):
with self._optional():
self._underscore_()
self._character_set_name_()
def block0():
self._quote_()
self._character_representation_()
self._quote_()
self._positive_closure(block0)
@graken()
def _character_representation_(self):
self._pattern(r"(''|[^'\n])*")
@graken()
def _national_character_string_literal_(self):
self._token('N')
def block0():
self._quote_()
self._character_representation_()
self._quote_()
self._positive_closure(block0)
@graken()
def _unicode_character_string_literal_(self):
with self._optional():
self._underscore_()
self._character_set_name_()
self._token('U&')
def block0():
self._quote_()
with self._optional():
def block1():
self._unicode_representation_()
self._positive_closure(block1)
self._quote_()
self._positive_closure(block0)
with self._optional():
self._token('ESCAPE')
self._escape_character_()
@graken()
def _unicode_representation_(self):
with self._choice():
with self._option():
self._character_representation_()
with self._option():
self._unicode_escape_value_()
self._error('no available options')
@graken()
def _binary_string_literal_(self):
self._token('X')
def block0():
self._quote_()
with self._optional():
def block1():
self._byte_()
self._positive_closure(block1)
self._quote_()
self._positive_closure(block0)
with self._optional():
self._token('ESCAPE')
self._escape_character_()
@graken()
def _hexit_(self):
self._pattern(r'[a-f\d]')
@graken()
def _byte_(self):
self._hexit_()
self._hexit_()
@graken()
def _signed_numeric_literal_(self):
with self._optional():
self._sign_()
self._unsigned_numeric_literal_()
@graken()
def _unsigned_numeric_literal_(self):
with self._choice():
with self._option():
self._exact_numeric_literal_()
with self._option():
self._approximate_numeric_literal_()
self._error('no available options')
@graken()
def _exact_numeric_literal_(self):
with self._choice():
with self._option():
self._unsigned_integer_()
with self._optional():
self._period_()
with self._optional():
self._unsigned_integer_()
with self._option():
self._period_()
self._unsigned_integer_()
self._error('no available options')
@graken()
def _sign_(self):
with self._choice():
with self._option():
self._plus_sign_()
with self._option():
self._minus_sign_()
self._error('no available options')
@graken()
def _approximate_numeric_literal_(self):
self._exact_numeric_literal_()
self._token('E')
self._signed_integer_()
@graken()
def _signed_integer_(self):
with self._optional():
self._sign_()
self._unsigned_integer_()
@graken()
def _unsigned_integer_(self):
self._digit_()
@graken()
def _datetime_literal_(self):
with self._choice():
with self._option():
self._date_literal_()
with self._option():
self._time_literal_()
with self._option():
self._timestamp_literal_()
self._error('no available options')
@graken()
def _date_literal_(self):
self._token('DATE')
self._date_string_()
@graken()
def _time_literal_(self):
self._token('TIME')
self._time_string_()
@graken()
def _timestamp_literal_(self):
self._token('TIMESTAMP')
self._timestamp_string_()
@graken()
def _date_string_(self):
self._quote_()
self._unquoted_date_string_()
self._quote_()
@graken()
def _time_string_(self):
self._quote_()
self._unquoted_time_string_()
self._quote_()
@graken()
def _timestamp_string_(self):
self._quote_()
self._unquoted_timestamp_string_()
self._quote_()
@graken()
def _time_zone_interval_(self):
self._sign_()
self._hours_value_()
self._colon_()
self._minutes_value_()
@graken()
def _date_value_(self):
self._years_value_()
self._minus_sign_()
self._months_value_()
self._minus_sign_()
self._days_value_()
@graken()
def _time_value_(self):
self._hours_value_()
self._colon_()
self._minutes_value_()
self._colon_()
self._seconds_value_()
@graken()
def _interval_literal_(self):
self._token('INTERVAL')
with self._optional():
self._sign_()
self._interval_string_()
self._interval_qualifier_()
@graken()
def _interval_string_(self):
self._quote_()
self._unquoted_interval_string_()
self._quote_()
@graken()
def _unquoted_date_string_(self):
self._date_value_()
@graken()
def _unquoted_time_string_(self):
self._time_value_()
with self._optional():
self._time_zone_interval_()
@graken()
def _unquoted_timestamp_string_(self):
self._unquoted_date_string_()
self._unquoted_time_string_()
@graken()
def _unquoted_interval_string_(self):
with self._optional():
self._sign_()
with self._group():
with self._choice():
with self._option():
self._year_month_literal_()
with self._option():
self._day_time_literal_()
self._error('no available options')
@graken()
def _year_month_literal_(self):
with self._choice():
with self._option():
self._years_value_()
with self._option():
with self._optional():
self._years_value_()
self._minus_sign_()
self._months_value_()
self._error('no available options')
@graken()
def _day_time_literal_(self):
with self._choice():
with self._option():
self._day_time_interval_()
with self._option():
self._time_interval_()
self._error('no available options')
@graken()
def _day_time_interval_(self):
self._days_value_()
with self._optional():
self._hours_value_()
with self._optional():
self._colon_()
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
@graken()
def _time_interval_(self):
with self._choice():
with self._option():
self._hours_value_()
with self._optional():
self._colon_()
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
with self._option():
self._minutes_value_()
with self._optional():
self._colon_()
self._seconds_value_()
with self._option():
self._seconds_value_()
self._error('no available options')
@graken()
def _years_value_(self):
self._datetime_value_()
@graken()
def _months_value_(self):
self._datetime_value_()
@graken()
def _days_value_(self):
self._datetime_value_()
@graken()
def _hours_value_(self):
self._datetime_value_()
@graken()
def _minutes_value_(self):
self._datetime_value_()
@graken()
def _seconds_value_(self):
self._unsigned_integer_()
with self._optional():
self._period_()
with self._optional():
self._unsigned_integer_()
@graken()
def _datetime_value_(self):
self._unsigned_integer_()
@graken()
def _boolean_literal_(self):
with self._choice():
with self._option():
self._token('TRUE')
with self._option():
self._token('FALSE')
with self._option():
self._token('UNKNOWN')
self._error('expecting one of: FALSE TRUE UNKNOWN')
@graken()
def _identifier_(self):
self._actual_identifier_()
@graken()
def _identifier_list_(self):
def sep0():
self._token(',')
def block0():
self._identifier_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _actual_identifier_(self):
with self._choice():
with self._option():
self._regular_identifier_()
with self._option():
self._delimited_identifier_()
self._error('no available options')
@graken()
def _table_name_(self):
self._local_or_schema_qualified_name_()
@graken()
def _schema_name_(self):
with self._optional():
self._identifier_()
self._period_()
self._identifier_()
@graken()
def _schema_qualified_name_(self):
with self._optional():
self._schema_name_()
self._period_()
self._identifier_()
@graken()
def _local_or_schema_qualified_name_(self):
with self._optional():
self._local_or_schema_qualifier_()
self._period_()
self._identifier_()
@graken()
def _local_or_schema_qualifier_(self):
with self._choice():
with self._option():
self._schema_name_()
with self._option():
self._token('MODULE')
self._error('expecting one of: MODULE')
@graken()
def _cursor_name_(self):
self._local_qualified_name_()
@graken()
def _local_qualified_name_(self):
with self._optional():
self._token('MODULE')
self._period_()
self._identifier_()
@graken()
def _host_parameter_name_(self):
self._colon_()
self._identifier_()
@graken()
def _external_routine_name_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._character_string_literal_()
self._error('no available options')
@graken()
def _character_set_name_(self):
with self._optional():
self._schema_name_()
self._period_()
self._regular_identifier_()
@graken()
def _connection_name_(self):
self._simple_value_specification_()
@graken()
def _sql_statement_name_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._extended_statement_name_()
self._error('no available options')
@graken()
def _extended_statement_name_(self):
with self._optional():
self._scope_option_()
self._simple_value_specification_()
@graken()
def _dynamic_cursor_name_(self):
with self._choice():
with self._option():
self._cursor_name_()
with self._option():
self._extended_cursor_name_()
self._error('no available options')
@graken()
def _extended_cursor_name_(self):
with self._optional():
self._scope_option_()
self._simple_value_specification_()
@graken()
def _descriptor_name_(self):
with self._optional():
self._scope_option_()
self._simple_value_specification_()
@graken()
def _scope_option_(self):
with self._choice():
with self._option():
self._token('GLOBAL')
with self._option():
self._token('LOCAL')
self._error('expecting one of: GLOBAL LOCAL')
@graken()
def _data_type_(self):
with self._choice():
with self._option():
self._predefined_type_()
with self._option():
self._row_type_()
with self._option():
self._schema_qualified_name_()
with self._option():
self._reference_type_()
with self._option():
self._collection_type_()
self._error('no available options')
@graken()
def _predefined_type_(self):
with self._choice():
with self._option():
self._character_string_type_()
with self._optional():
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._optional():
self._collate_clause_()
with self._option():
self._national_character_string_type_()
with self._optional():
self._collate_clause_()
with self._option():
self._binary_large_object_string_type_()
with self._option():
self._numeric_type_()
with self._option():
self._token('BOOLEAN')
with self._option():
self._datetime_type_()
with self._option():
self._interval_type_()
self._error('expecting one of: BOOLEAN')
@graken()
def _character_string_type_(self):
with self._choice():
with self._option():
self._token('CHARACTER')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHARACTER')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('CHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('VARCHAR')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._character_large_object_type_()
self._error('expecting one of: CHAR CHARACTER')
@graken()
def _character_large_object_type_(self):
with self._choice():
with self._option():
self._token('CHARACTER')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('CHAR')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('CLOB')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
self._error('expecting one of: CHAR CHARACTER CLOB')
@graken()
def _national_character_string_type_(self):
with self._choice():
with self._option():
self._token('NATIONAL')
self._token('CHARACTER')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NCHAR')
with self._optional():
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHARACTER')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NATIONAL')
self._token('CHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._token('NCHAR')
self._token('VARYING')
self._left_paren_()
self._length_()
self._right_paren_()
with self._option():
self._national_character_large_object_type_()
self._error('expecting one of: NATIONAL NCHAR')
@graken()
def _national_character_large_object_type_(self):
with self._choice():
with self._option():
self._token('NATIONAL')
self._token('CHARACTER')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('NCHAR')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('NCLOB')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
self._error('expecting one of: NATIONAL NCHAR NCLOB')
@graken()
def _binary_large_object_string_type_(self):
with self._choice():
with self._option():
self._token('BINARY')
self._token('LARGE')
self._token('OBJECT')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
with self._option():
self._token('BLOB')
with self._optional():
self._left_paren_()
self._large_object_length_()
self._right_paren_()
self._error('expecting one of: BINARY BLOB')
@graken()
def _numeric_type_(self):
with self._choice():
with self._option():
self._exact_numeric_type_()
with self._option():
self._approximate_numeric_type_()
self._error('no available options')
@graken()
def _exact_numeric_type_(self):
with self._choice():
with self._option():
self._token('NUMERIC')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('DECIMAL')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('DEC')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._scale_()
self._right_paren_()
with self._option():
self._token('SMALLINT')
with self._option():
self._token('INTEGER')
with self._option():
self._token('INT')
with self._option():
self._token('BIGINT')
self._error('expecting one of: BIGINT DEC DECIMAL INT INTEGER NUMERIC SMALLINT')
@graken()
def _approximate_numeric_type_(self):
with self._choice():
with self._option():
self._token('FLOAT')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._option():
self._token('REAL')
with self._option():
self._token('DOUBLE')
self._token('PRECISION')
self._error('expecting one of: DOUBLE FLOAT REAL')
@graken()
def _length_(self):
self._unsigned_integer_()
@graken()
def _large_object_length_(self):
with self._choice():
with self._option():
self._unsigned_integer_()
with self._optional():
self._multiplier_()
with self._optional():
self._char_length_units_()
with self._option():
self._large_object_length_token_()
with self._optional():
self._char_length_units_()
self._error('no available options')
@graken()
def _char_length_units_(self):
with self._choice():
with self._option():
self._token('CHARACTERS')
with self._option():
self._token('CODE_UNITS')
with self._option():
self._token('OCTETS')
self._error('expecting one of: CHARACTERS CODE_UNITS OCTETS')
@graken()
def _precision_(self):
self._unsigned_integer_()
@graken()
def _scale_(self):
self._unsigned_integer_()
@graken()
def _datetime_type_(self):
with self._choice():
with self._option():
self._token('DATE')
with self._option():
self._token('TIME')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._optional():
self._with_or_without_time_zone_()
with self._option():
self._token('TIMESTAMP')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._optional():
self._with_or_without_time_zone_()
self._error('expecting one of: DATE TIME TIMESTAMP')
@graken()
def _with_or_without_time_zone_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('TIME')
self._token('ZONE')
with self._option():
self._token('WITHOUT')
self._token('TIME')
self._token('ZONE')
self._error('expecting one of: WITH WITHOUT')
@graken()
def _interval_type_(self):
self._token('INTERVAL')
self._interval_qualifier_()
@graken()
def _row_type_(self):
self._token('ROW')
self._row_type_body_()
@graken()
def _row_type_body_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._field_definition_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _reference_type_(self):
self._token('REF')
self._left_paren_()
self._schema_qualified_name_()
self._right_paren_()
with self._optional():
self._scope_clause_()
@graken()
def _scope_clause_(self):
self._token('SCOPE')
self._table_name_()
@graken()
def _collection_type_(self):
with self._choice():
with self._option():
self._array_type_()
with self._option():
self._multiset_type_()
self._error('no available options')
@graken()
def _array_type_(self):
self._data_type_()
self._token('ARRAY')
with self._optional():
self._left_bracket_or_trigraph_()
self._unsigned_integer_()
self._right_bracket_or_trigraph_()
@graken()
def _multiset_type_(self):
self._data_type_()
self._token('MULTISET')
@graken()
def _field_definition_(self):
self._identifier_()
self._data_type_()
with self._optional():
self._reference_scope_check_()
@graken()
def _value_expression_primary_(self):
with self._choice():
with self._option():
self._parenthesized_value_expression_()
with self._option():
self._nonparenthesized_value_expression_primary_()
self._error('no available options')
@graken()
def _parenthesized_value_expression_(self):
self._left_paren_()
self._value_expression_()
self._right_paren_()
@graken()
def _nonparenthesized_value_expression_primary_(self):
with self._choice():
with self._option():
self._unsigned_value_specification_()
with self._option():
self._column_reference_()
with self._option():
self._set_function_specification_()
with self._option():
self._window_function_()
with self._option():
self._subquery_()
with self._option():
self._case_expression_()
with self._option():
self._cast_specification_()
with self._option():
self._field_reference_()
with self._option():
self._subtype_treatment_()
with self._option():
self._method_invocation_()
with self._option():
self._static_method_invocation_()
with self._option():
self._new_specification_()
with self._option():
self._attribute_or_method_reference_()
with self._option():
self._reference_resolution_()
with self._option():
self._collection_value_constructor_()
with self._option():
self._array_element_reference_()
with self._option():
self._multiset_element_reference_()
with self._option():
self._routine_invocation_()
with self._option():
self._next_value_expression_()
self._error('no available options')
@graken()
def _collection_value_constructor_(self):
with self._choice():
with self._option():
self._array_value_constructor_()
with self._option():
self._multiset_value_constructor_()
self._error('no available options')
@graken()
def _value_specification_(self):
with self._choice():
with self._option():
self._literal_()
with self._option():
self._general_value_specification_()
self._error('no available options')
@graken()
def _unsigned_value_specification_(self):
with self._choice():
with self._option():
self._unsigned_literal_()
with self._option():
self._general_value_specification_()
self._error('no available options')
@graken()
def _general_value_specification_(self):
with self._choice():
with self._option():
self._host_parameter_specification_()
with self._option():
self._sql_parameter_reference_()
with self._option():
self._dynamic_parameter_specification_()
with self._option():
self._current_collation_specification_()
with self._option():
self._token('CURRENT_DEFAULT_TRANSFORM_GROUP')
with self._option():
self._token('CURRENT_PATH')
with self._option():
self._token('CURRENT_ROLE')
with self._option():
self._token('CURRENT_TRANSFORM_GROUP_FOR_TYPE')
self._schema_qualified_name_()
with self._option():
self._token('CURRENT_USER')
with self._option():
self._token('SESSION_USER')
with self._option():
self._token('SYSTEM_USER')
with self._option():
self._token('USER')
with self._option():
self._token('VALUE')
self._error('expecting one of: CURRENT_DEFAULT_TRANSFORM_GROUP CURRENT_PATH CURRENT_ROLE CURRENT_USER SESSION_USER SYSTEM_USER USER VALUE')
@graken()
def _simple_value_specification_(self):
with self._choice():
with self._option():
self._literal_()
with self._option():
self._host_parameter_name_()
with self._option():
self._sql_parameter_reference_()
self._error('no available options')
@graken()
def _target_specification_(self):
with self._choice():
with self._option():
self._host_parameter_specification_()
with self._option():
self._sql_parameter_reference_()
with self._option():
self._column_reference_()
with self._option():
self._target_array_element_specification_()
with self._option():
self._dynamic_parameter_specification_()
self._error('no available options')
@graken()
def _simple_target_specification_(self):
with self._choice():
with self._option():
self._host_parameter_specification_()
with self._option():
self._sql_parameter_reference_()
with self._option():
self._column_reference_()
self._error('no available options')
@graken()
def _host_parameter_specification_(self):
self._host_parameter_name_()
with self._optional():
self._indicator_parameter_()
@graken()
def _dynamic_parameter_specification_(self):
self._question_mark_()
@graken()
def _indicator_parameter_(self):
with self._optional():
self._token('INDICATOR')
self._host_parameter_name_()
@graken()
def _target_array_element_specification_(self):
self._target_array_reference_()
self._left_bracket_or_trigraph_()
self._simple_value_specification_()
self._right_bracket_or_trigraph_()
@graken()
def _target_array_reference_(self):
with self._choice():
with self._option():
self._sql_parameter_reference_()
with self._option():
self._column_reference_()
self._error('no available options')
@graken()
def _current_collation_specification_(self):
self._token('CURRENT_COLLATION')
self._left_paren_()
self._string_value_expression_()
self._right_paren_()
@graken()
def _contextually_typed_value_specification_(self):
with self._choice():
with self._option():
self._implicitly_typed_value_specification_()
with self._option():
self._token('DEFAULT')
self._error('expecting one of: DEFAULT')
@graken()
def _implicitly_typed_value_specification_(self):
with self._choice():
with self._option():
self._token('NULL')
with self._option():
self._empty_specification_()
self._error('expecting one of: NULL')
@graken()
def _empty_specification_(self):
with self._choice():
with self._option():
self._token('ARRAY')
self._left_bracket_or_trigraph_()
self._right_bracket_or_trigraph_()
with self._option():
self._token('MULTISET')
self._left_bracket_or_trigraph_()
self._right_bracket_or_trigraph_()
self._error('no available options')
@graken()
def _identifier_chain_(self):
def sep0():
self._token('.')
def block0():
self._identifier_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _column_reference_(self):
with self._choice():
with self._option():
self._identifier_chain_()
with self._option():
self._token('MODULE')
self._period_()
self._identifier_()
self._period_()
self._identifier_()
self._error('no available options')
@graken()
def _sql_parameter_reference_(self):
self._identifier_chain_()
@graken()
def _set_function_specification_(self):
with self._choice():
with self._option():
self._aggregate_function_()
with self._option():
self._grouping_operation_()
self._error('no available options')
@graken()
def _grouping_operation_(self):
self._token('GROUPING')
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._column_reference_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _window_function_(self):
self._window_function_type_()
self._token('OVER')
self._window_name_or_specification_()
@graken()
def _window_function_type_(self):
with self._choice():
with self._option():
self._rank_function_type_()
self._left_paren_()
self._right_paren_()
with self._option():
self._token('ROW_NUMBER')
self._left_paren_()
self._right_paren_()
with self._option():
self._aggregate_function_()
self._error('no available options')
@graken()
def _rank_function_type_(self):
with self._choice():
with self._option():
self._token('RANK')
with self._option():
self._token('DENSE_RANK')
with self._option():
self._token('PERCENT_RANK')
with self._option():
self._token('CUME_DIST')
self._error('expecting one of: CUME_DIST DENSE_RANK PERCENT_RANK RANK')
@graken()
def _window_name_or_specification_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._window_specification_()
self._error('no available options')
@graken()
def _case_expression_(self):
with self._choice():
with self._option():
self._case_abbreviation_()
with self._option():
self._case_specification_()
self._error('no available options')
@graken()
def _case_abbreviation_(self):
with self._choice():
with self._option():
self._token('NULLIF')
self._left_paren_()
self._value_expression_()
self._comma_()
self._value_expression_()
self._right_paren_()
with self._option():
self._token('COALESCE')
self._left_paren_()
self._value_expression_()
def block0():
self._comma_()
self._value_expression_()
self._positive_closure(block0)
self._right_paren_()
self._error('no available options')
@graken()
def _case_specification_(self):
with self._choice():
with self._option():
self._simple_case_()
with self._option():
self._searched_case_()
self._error('no available options')
@graken()
def _simple_case_(self):
self._token('CASE')
self._case_operand_()
def block0():
self._simple_when_clause_()
self._positive_closure(block0)
with self._optional():
self._else_clause_()
self._token('END')
@graken()
def _searched_case_(self):
self._token('CASE')
def block0():
self._searched_when_clause_()
self._positive_closure(block0)
with self._optional():
self._else_clause_()
self._token('END')
@graken()
def _simple_when_clause_(self):
self._token('WHEN')
self._when_operand_()
self._token('THEN')
self._result_()
@graken()
def _searched_when_clause_(self):
self._token('WHEN')
self._search_condition_()
self._token('THEN')
self._result_()
@graken()
def _else_clause_(self):
self._token('ELSE')
self._result_()
@graken()
def _case_operand_(self):
with self._choice():
with self._option():
self._row_value_predicand_()
with self._option():
self._overlaps_predicate_part_1_()
self._error('no available options')
@graken()
def _when_operand_(self):
with self._choice():
with self._option():
self._row_value_predicand_()
with self._option():
self._comparison_predicate_part_2_()
with self._option():
self._between_predicate_part_2_()
with self._option():
self._in_predicate_part_2_()
with self._option():
self._character_like_predicate_part_2_()
with self._option():
self._octet_like_predicate_part_2_()
with self._option():
self._similar_predicate_part_2_()
with self._option():
self._null_predicate_part_2_()
with self._option():
self._quantified_comparison_predicate_part_2_()
with self._option():
self._match_predicate_part_2_()
with self._option():
self._overlaps_predicate_part_2_()
with self._option():
self._distinct_predicate_part_2_()
with self._option():
self._member_predicate_part_2_()
with self._option():
self._submultiset_predicate_part_2_()
with self._option():
self._set_predicate_part_2_()
with self._option():
self._type_predicate_part_2_()
self._error('no available options')
@graken()
def _result_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._token('NULL')
self._error('expecting one of: NULL')
@graken()
def _cast_specification_(self):
self._token('CAST')
self._left_paren_()
self._cast_operand_()
self._token('AS')
self._cast_target_()
self._right_paren_()
@graken()
def _cast_operand_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._implicitly_typed_value_specification_()
self._error('no available options')
@graken()
def _cast_target_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._data_type_()
self._error('no available options')
@graken()
def _next_value_expression_(self):
self._token('NEXT')
self._token('VALUE')
self._token('FOR')
self._schema_qualified_name_()
@graken()
def _field_reference_(self):
self._value_expression_primary_()
self._period_()
self._identifier_()
@graken()
def _subtype_treatment_(self):
self._token('TREAT')
self._left_paren_()
self._value_expression_()
self._token('AS')
self._target_subtype_()
self._right_paren_()
@graken()
def _target_subtype_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._reference_type_()
self._error('no available options')
@graken()
def _method_invocation_(self):
with self._choice():
with self._option():
self._direct_invocation_()
with self._option():
self._generalized_invocation_()
self._error('no available options')
@graken()
def _direct_invocation_(self):
self._value_expression_primary_()
self._period_()
self._identifier_()
with self._optional():
self._sql_argument_list_()
@graken()
def _generalized_invocation_(self):
self._left_paren_()
self._value_expression_primary_()
self._token('AS')
self._data_type_()
self._right_paren_()
self._period_()
self._identifier_()
with self._optional():
self._sql_argument_list_()
@graken()
def _static_method_invocation_(self):
self._schema_qualified_name_()
self._double_colon_()
self._identifier_()
with self._optional():
self._sql_argument_list_()
@graken()
def _new_specification_(self):
self._token('NEW')
self._routine_invocation_()
@graken()
def _attribute_or_method_reference_(self):
self._value_expression_primary_()
self._right_arrow_()
self._identifier_()
with self._optional():
self._sql_argument_list_()
@graken()
def _reference_resolution_(self):
self._token('DEREF')
self._left_paren_()
self._reference_value_expression_()
self._right_paren_()
@graken()
def _array_element_reference_(self):
self._array_value_expression_()
self._left_bracket_or_trigraph_()
self._numeric_value_expression_()
self._right_bracket_or_trigraph_()
@graken()
def _multiset_element_reference_(self):
self._token('ELEMENT')
self._left_paren_()
self._multiset_value_expression_()
self._right_paren_()
@graken()
def _value_expression_(self):
with self._choice():
with self._option():
self._common_value_expression_()
with self._option():
self._boolean_value_expression_()
with self._option():
self._row_value_expression_()
self._error('no available options')
@graken()
def _common_value_expression_(self):
with self._choice():
with self._option():
self._numeric_value_expression_()
with self._option():
self._string_value_expression_()
with self._option():
self._datetime_value_expression_()
with self._option():
self._interval_value_expression_()
with self._option():
self._user_defined_type_value_expression_()
with self._option():
self._reference_value_expression_()
with self._option():
self._collection_value_expression_()
self._error('no available options')
@graken()
def _user_defined_type_value_expression_(self):
self._value_expression_primary_()
@graken()
def _reference_value_expression_(self):
self._value_expression_primary_()
@graken()
def _collection_value_expression_(self):
with self._choice():
with self._option():
self._array_value_expression_()
with self._option():
self._multiset_value_expression_()
self._error('no available options')
@graken()
def _numeric_value_expression_(self):
with self._choice():
with self._option():
self._term_()
with self._option():
self._numeric_value_expression_()
self._plus_sign_()
self._term_()
with self._option():
self._numeric_value_expression_()
self._minus_sign_()
self._term_()
self._error('no available options')
@graken()
def _term_(self):
with self._choice():
with self._option():
self._factor_()
with self._option():
self._term_()
self._asterisk_()
self._factor_()
with self._option():
self._term_()
self._solidus_()
self._factor_()
self._error('no available options')
@graken()
def _factor_(self):
with self._optional():
self._sign_()
self._numeric_primary_()
@graken()
def _numeric_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._numeric_value_function_()
self._error('no available options')
@graken()
def _numeric_value_function_(self):
with self._choice():
with self._option():
self._position_expression_()
with self._option():
self._extract_expression_()
with self._option():
self._length_expression_()
with self._option():
self._cardinality_expression_()
with self._option():
self._absolute_value_expression_()
with self._option():
self._modulus_expression_()
with self._option():
self._natural_logarithm_()
with self._option():
self._exponential_function_()
with self._option():
self._power_function_()
with self._option():
self._square_root_()
with self._option():
self._floor_function_()
with self._option():
self._ceiling_function_()
with self._option():
self._width_bucket_function_()
self._error('no available options')
@graken()
def _position_expression_(self):
with self._choice():
with self._option():
self._string_position_expression_()
with self._option():
self._blob_position_expression_()
self._error('no available options')
@graken()
def _string_position_expression_(self):
self._token('POSITION')
self._left_paren_()
self._string_value_expression_()
self._token('IN')
self._string_value_expression_()
with self._optional():
self._token('USING')
self._char_length_units_()
self._right_paren_()
@graken()
def _blob_position_expression_(self):
self._token('POSITION')
self._left_paren_()
self._blob_value_expression_()
self._token('IN')
self._blob_value_expression_()
self._right_paren_()
@graken()
def _length_expression_(self):
with self._choice():
with self._option():
self._char_length_expression_()
with self._option():
self._octet_length_expression_()
self._error('no available options')
@graken()
def _char_length_expression_(self):
with self._group():
with self._choice():
with self._option():
self._token('CHAR_LENGTH')
with self._option():
self._token('CHARACTER_LENGTH')
self._error('expecting one of: CHARACTER_LENGTH CHAR_LENGTH')
self._left_paren_()
self._string_value_expression_()
with self._optional():
self._token('USING')
self._char_length_units_()
self._right_paren_()
@graken()
def _octet_length_expression_(self):
self._token('OCTET_LENGTH')
self._left_paren_()
self._string_value_expression_()
self._right_paren_()
@graken()
def _extract_expression_(self):
self._token('EXTRACT')
self._left_paren_()
self._extract_field_()
self._token('FROM')
self._extract_source_()
self._right_paren_()
@graken()
def _extract_field_(self):
with self._choice():
with self._option():
self._primary_datetime_field_()
with self._option():
self._time_zone_field_()
self._error('no available options')
@graken()
def _time_zone_field_(self):
with self._choice():
with self._option():
self._token('TIMEZONE_HOUR')
with self._option():
self._token('TIMEZONE_MINUTE')
self._error('expecting one of: TIMEZONE_HOUR TIMEZONE_MINUTE')
@graken()
def _extract_source_(self):
with self._choice():
with self._option():
self._datetime_value_expression_()
with self._option():
self._interval_value_expression_()
self._error('no available options')
@graken()
def _cardinality_expression_(self):
self._token('CARDINALITY')
self._left_paren_()
self._collection_value_expression_()
self._right_paren_()
@graken()
def _absolute_value_expression_(self):
self._token('ABS')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _modulus_expression_(self):
self._token('MOD')
self._left_paren_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _natural_logarithm_(self):
self._token('LN')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _exponential_function_(self):
self._token('EXP')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _power_function_(self):
self._token('POWER')
self._left_paren_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _square_root_(self):
self._token('SQRT')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _floor_function_(self):
self._token('FLOOR')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _ceiling_function_(self):
with self._group():
with self._choice():
with self._option():
self._token('CEIL')
with self._option():
self._token('CEILING')
self._error('expecting one of: CEIL CEILING')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _width_bucket_function_(self):
self._token('WIDTH_BUCKET')
self._left_paren_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _string_value_expression_(self):
with self._choice():
with self._option():
self._character_value_expression_()
with self._option():
self._blob_value_expression_()
self._error('no available options')
@graken()
def _character_value_expression_(self):
with self._choice():
with self._option():
self._concatenation_()
with self._option():
self._character_factor_()
self._error('no available options')
@graken()
def _concatenation_(self):
self._character_value_expression_()
self._concatenation_operator_()
self._character_factor_()
@graken()
def _character_factor_(self):
self._character_primary_()
with self._optional():
self._collate_clause_()
@graken()
def _character_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._string_value_function_()
self._error('no available options')
@graken()
def _blob_value_expression_(self):
with self._choice():
with self._option():
self._blob_concatenation_()
with self._option():
self._blob_factor_()
self._error('no available options')
@graken()
def _blob_factor_(self):
self._blob_primary_()
@graken()
def _blob_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._string_value_function_()
self._error('no available options')
@graken()
def _blob_concatenation_(self):
self._blob_value_expression_()
self._concatenation_operator_()
self._blob_factor_()
@graken()
def _string_value_function_(self):
with self._choice():
with self._option():
self._character_value_function_()
with self._option():
self._blob_value_function_()
self._error('no available options')
@graken()
def _character_value_function_(self):
with self._choice():
with self._option():
self._character_substring_function_()
with self._option():
self._regular_expression_substring_function_()
with self._option():
self._fold_()
with self._option():
self._transcoding_()
with self._option():
self._character_transliteration_()
with self._option():
self._trim_function_()
with self._option():
self._character_overlay_function_()
with self._option():
self._normalize_function_()
with self._option():
self._specific_type_method_()
self._error('no available options')
@graken()
def _character_substring_function_(self):
self._token('SUBSTRING')
self._left_paren_()
self._character_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
with self._optional():
self._token('USING')
self._char_length_units_()
self._right_paren_()
@graken()
def _regular_expression_substring_function_(self):
self._token('SUBSTRING')
self._left_paren_()
self._character_value_expression_()
self._token('SIMILAR')
self._character_value_expression_()
self._token('ESCAPE')
self._escape_character_()
self._right_paren_()
@graken()
def _fold_(self):
with self._group():
with self._choice():
with self._option():
self._token('UPPER')
with self._option():
self._token('LOWER')
self._error('expecting one of: LOWER UPPER')
self._left_paren_()
self._character_value_expression_()
self._right_paren_()
@graken()
def _transcoding_(self):
self._token('CONVERT')
self._left_paren_()
self._character_value_expression_()
self._token('USING')
self._schema_qualified_name_()
self._right_paren_()
@graken()
def _character_transliteration_(self):
self._token('TRANSLATE')
self._left_paren_()
self._character_value_expression_()
self._token('USING')
self._schema_qualified_name_()
self._right_paren_()
@graken()
def _trim_function_(self):
self._token('TRIM')
self._left_paren_()
self._trim_operands_()
self._right_paren_()
@graken()
def _trim_operands_(self):
with self._optional():
with self._optional():
self._trim_specification_()
with self._optional():
self._character_value_expression_()
self._token('FROM')
self._character_value_expression_()
@graken()
def _trim_specification_(self):
with self._choice():
with self._option():
self._token('LEADING')
with self._option():
self._token('TRAILING')
with self._option():
self._token('BOTH')
self._error('expecting one of: BOTH LEADING TRAILING')
@graken()
def _character_overlay_function_(self):
self._token('OVERLAY')
self._left_paren_()
self._character_value_expression_()
self._token('PLACING')
self._character_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
with self._optional():
self._token('USING')
self._char_length_units_()
self._right_paren_()
@graken()
def _normalize_function_(self):
self._token('NORMALIZE')
self._left_paren_()
self._character_value_expression_()
self._right_paren_()
@graken()
def _specific_type_method_(self):
self._user_defined_type_value_expression_()
self._period_()
self._token('SPECIFICTYPE')
@graken()
def _blob_value_function_(self):
with self._choice():
with self._option():
self._blob_substring_function_()
with self._option():
self._blob_trim_function_()
with self._option():
self._blob_overlay_function_()
self._error('no available options')
@graken()
def _blob_substring_function_(self):
self._token('SUBSTRING')
self._left_paren_()
self._blob_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
self._right_paren_()
@graken()
def _blob_trim_function_(self):
self._token('TRIM')
self._left_paren_()
self._blob_trim_operands_()
self._right_paren_()
@graken()
def _blob_trim_operands_(self):
with self._optional():
with self._optional():
self._trim_specification_()
with self._optional():
self._blob_value_expression_()
self._token('FROM')
self._blob_value_expression_()
@graken()
def _blob_overlay_function_(self):
self._token('OVERLAY')
self._left_paren_()
self._blob_value_expression_()
self._token('PLACING')
self._blob_value_expression_()
self._token('FROM')
self._start_position_()
with self._optional():
self._token('FOR')
self._string_length_()
self._right_paren_()
@graken()
def _start_position_(self):
self._numeric_value_expression_()
@graken()
def _string_length_(self):
self._numeric_value_expression_()
@graken()
def _datetime_value_expression_(self):
with self._choice():
with self._option():
self._datetime_term_()
with self._option():
self._interval_value_expression_()
self._plus_sign_()
self._datetime_term_()
with self._option():
self._datetime_value_expression_()
self._plus_sign_()
self._interval_term_()
with self._option():
self._datetime_value_expression_()
self._minus_sign_()
self._interval_term_()
self._error('no available options')
@graken()
def _datetime_term_(self):
self._datetime_factor_()
@graken()
def _datetime_factor_(self):
self._datetime_primary_()
with self._optional():
self._time_zone_()
@graken()
def _datetime_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._option():
self._datetime_value_function_()
self._error('no available options')
@graken()
def _time_zone_(self):
self._token('AT')
self._time_zone_specifier_()
@graken()
def _time_zone_specifier_(self):
with self._choice():
with self._option():
self._token('LOCAL')
with self._option():
self._token('TIME')
self._token('ZONE')
self._interval_primary_()
self._error('expecting one of: LOCAL')
@graken()
def _datetime_value_function_(self):
with self._choice():
with self._option():
self._token('CURRENT_DATE')
with self._option():
self._current_time_value_function_()
with self._option():
self._current_timestamp_value_function_()
with self._option():
self._current_local_time_value_function_()
with self._option():
self._current_local_timestamp_value_function_()
self._error('expecting one of: CURRENT_DATE')
@graken()
def _current_time_value_function_(self):
self._token('CURRENT_TIME')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _current_local_time_value_function_(self):
self._token('LOCALTIME')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _current_timestamp_value_function_(self):
self._token('CURRENT_TIMESTAMP')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _current_local_timestamp_value_function_(self):
self._token('LOCALTIMESTAMP')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _interval_value_expression_(self):
with self._choice():
with self._option():
self._interval_term_()
with self._option():
self._interval_value_expression_1_()
self._plus_sign_()
self._interval_term_1_()
with self._option():
self._interval_value_expression_1_()
self._minus_sign_()
self._interval_term_1_()
with self._option():
self._left_paren_()
self._datetime_value_expression_()
self._minus_sign_()
self._datetime_term_()
self._right_paren_()
self._interval_qualifier_()
self._error('no available options')
@graken()
def _interval_term_(self):
with self._choice():
with self._option():
self._interval_factor_()
with self._option():
self._interval_term_2_()
self._asterisk_()
self._factor_()
with self._option():
self._interval_term_2_()
self._solidus_()
self._factor_()
with self._option():
self._term_()
self._asterisk_()
self._interval_factor_()
self._error('no available options')
@graken()
def _interval_factor_(self):
with self._optional():
self._sign_()
self._interval_primary_()
@graken()
def _interval_primary_(self):
with self._choice():
with self._option():
self._value_expression_primary_()
with self._optional():
self._interval_qualifier_()
with self._option():
self._interval_absolute_value_function_()
self._error('no available options')
@graken()
def _interval_value_expression_1_(self):
self._interval_value_expression_()
@graken()
def _interval_term_1_(self):
self._interval_term_()
@graken()
def _interval_term_2_(self):
self._interval_term_()
@graken()
def _interval_absolute_value_function_(self):
self._token('ABS')
self._left_paren_()
self._interval_value_expression_()
self._right_paren_()
@graken()
def _boolean_value_expression_(self):
with self._choice():
with self._option():
self._boolean_term_()
with self._option():
self._boolean_value_expression_()
self._token('OR')
self._boolean_term_()
self._error('no available options')
@graken()
def _boolean_term_(self):
with self._choice():
with self._option():
self._boolean_factor_()
with self._option():
self._boolean_term_()
self._token('AND')
self._boolean_factor_()
self._error('no available options')
@graken()
def _boolean_factor_(self):
with self._optional():
self._token('NOT')
self._boolean_test_()
@graken()
def _boolean_test_(self):
self._boolean_primary_()
with self._optional():
self._token('IS')
with self._optional():
self._token('NOT')
self._truth_value_()
@graken()
def _truth_value_(self):
with self._choice():
with self._option():
self._token('TRUE')
with self._option():
self._token('FALSE')
with self._option():
self._token('UNKNOWN')
self._error('expecting one of: FALSE TRUE UNKNOWN')
@graken()
def _boolean_primary_(self):
with self._choice():
with self._option():
self._predicate_()
with self._option():
self._boolean_predicand_()
self._error('no available options')
@graken()
def _boolean_predicand_(self):
with self._choice():
with self._option():
self._parenthesized_boolean_value_expression_()
with self._option():
self._nonparenthesized_value_expression_primary_()
self._error('no available options')
@graken()
def _parenthesized_boolean_value_expression_(self):
self._left_paren_()
self._boolean_value_expression_()
self._right_paren_()
@graken()
def _array_value_expression_(self):
with self._choice():
with self._option():
self._array_concatenation_()
with self._option():
self._array_primary_()
self._error('no available options')
@graken()
def _array_concatenation_(self):
self._array_value_expression_()
self._concatenation_operator_()
self._array_primary_()
@graken()
def _array_primary_(self):
self._value_expression_primary_()
@graken()
def _array_value_constructor_(self):
with self._choice():
with self._option():
self._array_value_constructor_by_enumeration_()
with self._option():
self._array_value_constructor_by_query_()
self._error('no available options')
@graken()
def _array_value_constructor_by_enumeration_(self):
self._token('ARRAY')
self._left_bracket_or_trigraph_()
self._array_element_list_()
self._right_bracket_or_trigraph_()
@graken()
def _array_element_list_(self):
def sep0():
self._token(',')
def block0():
self._array_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _array_element_(self):
self._value_expression_()
@graken()
def _array_value_constructor_by_query_(self):
self._token('ARRAY')
self._left_paren_()
self._query_expression_()
with self._optional():
self._order_by_clause_()
self._right_paren_()
@graken()
def _multiset_value_expression_(self):
with self._choice():
with self._option():
self._multiset_term_()
with self._option():
self._multiset_value_expression_()
self._token('MULTISET')
self._token('UNION')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
self._multiset_term_()
with self._option():
self._multiset_value_expression_()
self._token('MULTISET')
self._token('EXCEPT')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
self._multiset_term_()
self._error('no available options')
@graken()
def _multiset_term_(self):
with self._choice():
with self._option():
self._multiset_primary_()
with self._option():
self._multiset_term_()
self._token('MULTISET')
self._token('INTERSECT')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
self._multiset_primary_()
self._error('no available options')
@graken()
def _multiset_primary_(self):
with self._choice():
with self._option():
self._multiset_set_function_()
with self._option():
self._value_expression_primary_()
self._error('no available options')
@graken()
def _multiset_set_function_(self):
self._token('SET')
self._left_paren_()
self._multiset_value_expression_()
self._right_paren_()
@graken()
def _multiset_value_constructor_(self):
with self._choice():
with self._option():
self._multiset_value_constructor_by_enumeration_()
with self._option():
self._multiset_value_constructor_by_query_()
with self._option():
self._table_value_constructor_by_query_()
self._error('no available options')
@graken()
def _multiset_value_constructor_by_enumeration_(self):
self._token('MULTISET')
self._left_bracket_or_trigraph_()
self._multiset_element_list_()
self._right_bracket_or_trigraph_()
@graken()
def _multiset_element_list_(self):
def sep0():
self._token(',')
def block0():
self._multiset_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _multiset_element_(self):
self._value_expression_()
@graken()
def _multiset_value_constructor_by_query_(self):
self._token('MULTISET')
self._subquery_()
@graken()
def _table_value_constructor_by_query_(self):
self._token('TABLE')
self._subquery_()
@graken()
def _row_value_constructor_(self):
with self._choice():
with self._option():
self._common_value_expression_()
with self._option():
self._boolean_value_expression_()
with self._option():
self._explicit_row_value_constructor_()
self._error('no available options')
@graken()
def _explicit_row_value_constructor_(self):
with self._choice():
with self._option():
self._left_paren_()
self._row_value_constructor_element_()
self._comma_()
self._row_value_constructor_element_list_()
self._right_paren_()
with self._option():
self._token('ROW')
self._left_paren_()
self._row_value_constructor_element_list_()
self._right_paren_()
with self._option():
self._subquery_()
self._error('no available options')
@graken()
def _row_value_constructor_element_list_(self):
def sep0():
self._token(',')
def block0():
self._row_value_constructor_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _row_value_constructor_element_(self):
self._value_expression_()
@graken()
def _contextually_typed_row_value_constructor_(self):
with self._choice():
with self._option():
self._common_value_expression_()
with self._option():
self._boolean_value_expression_()
with self._option():
self._contextually_typed_value_specification_()
with self._option():
self._left_paren_()
self._contextually_typed_row_value_constructor_element_()
self._comma_()
self._contextually_typed_row_value_constructor_element_list_()
self._right_paren_()
with self._option():
self._token('ROW')
self._left_paren_()
self._contextually_typed_row_value_constructor_element_list_()
self._right_paren_()
self._error('no available options')
@graken()
def _contextually_typed_row_value_constructor_element_list_(self):
def sep0():
self._token(',')
def block0():
self._contextually_typed_row_value_constructor_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _contextually_typed_row_value_constructor_element_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._contextually_typed_value_specification_()
self._error('no available options')
@graken()
def _row_value_constructor_predicand_(self):
with self._choice():
with self._option():
self._common_value_expression_()
with self._option():
self._boolean_predicand_()
with self._option():
self._explicit_row_value_constructor_()
self._error('no available options')
@graken()
def _row_value_expression_(self):
with self._choice():
with self._option():
self._row_value_special_case_()
with self._option():
self._explicit_row_value_constructor_()
self._error('no available options')
@graken()
def _table_row_value_expression_(self):
with self._choice():
with self._option():
self._row_value_special_case_()
with self._option():
self._row_value_constructor_()
self._error('no available options')
@graken()
def _contextually_typed_row_value_expression_(self):
with self._choice():
with self._option():
self._row_value_special_case_()
with self._option():
self._contextually_typed_row_value_constructor_()
self._error('no available options')
@graken()
def _row_value_predicand_(self):
with self._choice():
with self._option():
self._row_value_special_case_()
with self._option():
self._row_value_constructor_predicand_()
self._error('no available options')
@graken()
def _row_value_special_case_(self):
self._nonparenthesized_value_expression_primary_()
@graken()
def _table_value_constructor_(self):
self._token('VALUES')
self._row_value_expression_list_()
@graken()
def _row_value_expression_list_(self):
def sep0():
self._token(',')
def block0():
self._table_row_value_expression_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _contextually_typed_table_value_constructor_(self):
self._token('VALUES')
self._contextually_typed_row_value_expression_list_()
@graken()
def _contextually_typed_row_value_expression_list_(self):
def sep0():
self._token(',')
def block0():
self._contextually_typed_row_value_expression_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _table_expression_(self):
self._from_clause_()
with self._optional():
self._where_clause_()
with self._optional():
self._group_by_clause_()
with self._optional():
self._having_clause_()
with self._optional():
self._window_clause_()
@graken()
def _from_clause_(self):
self._token('FROM')
self._table_reference_list_()
@graken()
def _table_reference_list_(self):
def sep0():
self._token(',')
def block0():
self._table_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _table_reference_(self):
with self._choice():
with self._option():
self._table_factor_()
with self._option():
self._joined_table_()
with self._optional():
self._sample_clause_()
self._error('no available options')
@graken()
def _table_factor_(self):
self._table_primary_()
with self._optional():
self._sample_clause_()
@graken()
def _sample_clause_(self):
self._token('TABLESAMPLE')
self._sample_method_()
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
with self._optional():
self._repeatable_clause_()
@graken()
def _sample_method_(self):
with self._choice():
with self._option():
self._token('BERNOULLI')
with self._option():
self._token('SYSTEM')
self._error('expecting one of: BERNOULLI SYSTEM')
@graken()
def _repeatable_clause_(self):
self._token('REPEATABLE')
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _table_primary_(self):
with self._choice():
with self._option():
self._table_or_query_name_()
with self._optional():
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._subquery_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._lateral_derived_table_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._collection_derived_table_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._table_function_derived_table_()
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._only_spec_()
with self._optional():
self._as_clause_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._parenthesized_joined_table_()
self._error('no available options')
@graken()
def _parenthesized_joined_table_(self):
self._left_paren_()
self._joined_table_()
self._right_paren_()
@graken()
def _only_spec_(self):
self._token('ONLY')
self._left_paren_()
self._table_or_query_name_()
self._right_paren_()
@graken()
def _lateral_derived_table_(self):
self._token('LATERAL')
self._subquery_()
@graken()
def _collection_derived_table_(self):
self._token('UNNEST')
self._left_paren_()
self._collection_value_expression_()
self._right_paren_()
with self._optional():
self._token('WITH')
self._token('ORDINALITY')
@graken()
def _table_function_derived_table_(self):
self._token('TABLE')
self._left_paren_()
self._collection_value_expression_()
self._right_paren_()
@graken()
def _table_or_query_name_(self):
with self._choice():
with self._option():
self._table_name_()
with self._option():
self._identifier_()
self._error('no available options')
@graken()
def _column_name_list_(self):
self._identifier_list_()
@graken()
def _joined_table_(self):
with self._choice():
with self._option():
self._cross_join_()
with self._option():
self._qualified_join_()
with self._option():
self._natural_join_()
with self._option():
self._union_join_()
self._error('no available options')
@graken()
def _cross_join_(self):
self._table_reference_()
self._token('CROSS')
self._token('JOIN')
self._table_primary_()
@graken()
def _qualified_join_(self):
self._table_reference_()
with self._optional():
self._join_type_()
self._token('JOIN')
self._table_reference_()
self._join_specification_()
@graken()
def _natural_join_(self):
self._table_reference_()
self._token('NATURAL')
with self._optional():
self._join_type_()
self._token('JOIN')
self._table_primary_()
@graken()
def _union_join_(self):
self._table_reference_()
self._token('UNION')
self._token('JOIN')
self._table_primary_()
@graken()
def _join_specification_(self):
with self._choice():
with self._option():
self._join_condition_()
with self._option():
self._named_columns_join_()
self._error('no available options')
@graken()
def _join_condition_(self):
self._token('ON')
self._search_condition_()
@graken()
def _named_columns_join_(self):
self._token('USING')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _join_type_(self):
with self._choice():
with self._option():
self._token('INNER')
with self._option():
self._outer_join_type_()
with self._optional():
self._token('OUTER')
self._error('expecting one of: INNER')
@graken()
def _outer_join_type_(self):
with self._choice():
with self._option():
self._token('LEFT')
with self._option():
self._token('RIGHT')
with self._option():
self._token('FULL')
self._error('expecting one of: FULL LEFT RIGHT')
@graken()
def _where_clause_(self):
self._token('WHERE')
self._search_condition_()
@graken()
def _group_by_clause_(self):
self._token('GROUP')
self._token('BY')
with self._optional():
self._set_quantifier_()
self._grouping_element_list_()
@graken()
def _grouping_element_list_(self):
def sep0():
self._token(',')
def block0():
self._grouping_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _grouping_element_(self):
with self._choice():
with self._option():
self._ordinary_grouping_set_()
with self._option():
self._rollup_list_()
with self._option():
self._cube_list_()
with self._option():
self._grouping_sets_specification_()
with self._option():
self._empty_grouping_set_()
self._error('no available options')
@graken()
def _ordinary_grouping_set_(self):
with self._choice():
with self._option():
self._grouping_column_reference_()
with self._option():
self._left_paren_()
self._grouping_column_reference_list_()
self._right_paren_()
self._error('no available options')
@graken()
def _grouping_column_reference_(self):
self._column_reference_()
with self._optional():
self._collate_clause_()
@graken()
def _grouping_column_reference_list_(self):
def sep0():
self._token(',')
def block0():
self._grouping_column_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _rollup_list_(self):
self._token('ROLLUP')
self._left_paren_()
self._ordinary_grouping_set_list_()
self._right_paren_()
@graken()
def _ordinary_grouping_set_list_(self):
def sep0():
self._token(',')
def block0():
self._ordinary_grouping_set_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _cube_list_(self):
self._token('CUBE')
self._left_paren_()
self._ordinary_grouping_set_list_()
self._right_paren_()
@graken()
def _grouping_sets_specification_(self):
self._token('GROUPING')
self._token('SETS')
self._left_paren_()
self._grouping_set_list_()
self._right_paren_()
@graken()
def _grouping_set_list_(self):
def sep0():
self._token(',')
def block0():
self._grouping_set_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _grouping_set_(self):
with self._choice():
with self._option():
self._ordinary_grouping_set_()
with self._option():
self._rollup_list_()
with self._option():
self._cube_list_()
with self._option():
self._grouping_sets_specification_()
with self._option():
self._empty_grouping_set_()
self._error('no available options')
@graken()
def _empty_grouping_set_(self):
self._left_paren_()
self._right_paren_()
@graken()
def _having_clause_(self):
self._token('HAVING')
self._search_condition_()
@graken()
def _window_clause_(self):
self._token('WINDOW')
self._window_definition_list_()
@graken()
def _window_definition_list_(self):
def sep0():
self._token(',')
def block0():
self._window_definition_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _window_definition_(self):
self._identifier_()
self._token('AS')
self._window_specification_()
@graken()
def _window_specification_(self):
self._left_paren_()
self._window_specification_details_()
self._right_paren_()
@graken()
def _window_specification_details_(self):
with self._optional():
self._identifier_()
with self._optional():
self._window_partition_clause_()
with self._optional():
self._order_by_clause_()
with self._optional():
self._window_frame_clause_()
@graken()
def _window_partition_clause_(self):
self._token('PARTITION')
self._token('BY')
self._window_partition_column_reference_list_()
@graken()
def _window_partition_column_reference_list_(self):
def sep0():
self._token(',')
def block0():
self._window_partition_column_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _window_partition_column_reference_(self):
self._column_reference_()
with self._optional():
self._collate_clause_()
@graken()
def _window_frame_clause_(self):
self._window_frame_units_()
self._window_frame_extent_()
with self._optional():
self._window_frame_exclusion_()
@graken()
def _window_frame_units_(self):
with self._choice():
with self._option():
self._token('ROWS')
with self._option():
self._token('RANGE')
self._error('expecting one of: RANGE ROWS')
@graken()
def _window_frame_extent_(self):
with self._choice():
with self._option():
self._window_frame_start_()
with self._option():
self._window_frame_between_()
self._error('no available options')
@graken()
def _window_frame_start_(self):
with self._choice():
with self._option():
self._token('UNBOUNDED')
self._token('PRECEDING')
with self._option():
self._window_frame_preceding_()
with self._option():
self._token('CURRENT')
self._token('ROW')
self._error('expecting one of: CURRENT UNBOUNDED')
@graken()
def _window_frame_preceding_(self):
self._unsigned_value_specification_()
self._token('PRECEDING')
@graken()
def _window_frame_between_(self):
self._token('BETWEEN')
self._window_frame_bound_()
self._token('AND')
self._window_frame_bound_()
@graken()
def _window_frame_bound_(self):
with self._choice():
with self._option():
self._window_frame_start_()
with self._option():
self._token('UNBOUNDED')
self._token('FOLLOWING')
with self._option():
self._window_frame_following_()
self._error('expecting one of: UNBOUNDED')
@graken()
def _window_frame_following_(self):
self._unsigned_value_specification_()
self._token('FOLLOWING')
@graken()
def _window_frame_exclusion_(self):
with self._choice():
with self._option():
self._token('EXCLUDE')
self._token('CURRENT')
self._token('ROW')
with self._option():
self._token('EXCLUDE')
self._token('GROUP')
with self._option():
self._token('EXCLUDE')
self._token('TIES')
with self._option():
self._token('EXCLUDE')
self._token('NO')
self._token('OTHERS')
self._error('expecting one of: EXCLUDE')
@graken()
def _query_specification_(self):
self._token('SELECT')
with self._optional():
self._set_quantifier_()
self._select_list_()
self._table_expression_()
@graken()
def _select_list_(self):
with self._choice():
with self._option():
self._asterisk_()
with self._option():
def sep0():
self._token(',')
def block0():
self._select_sublist_()
self._positive_closure(block0, prefix=sep0)
self._error('no available options')
@graken()
def _select_sublist_(self):
with self._choice():
with self._option():
self._derived_column_()
with self._option():
self._qualified_asterisk_()
self._error('no available options')
@graken()
def _qualified_asterisk_(self):
with self._choice():
with self._option():
self._identifier_chain_()
self._period_()
self._asterisk_()
with self._option():
self._all_fields_reference_()
self._error('no available options')
@graken()
def _derived_column_(self):
self._value_expression_()
with self._optional():
self._as_clause_()
@graken()
def _as_clause_(self):
with self._optional():
self._token('AS')
self._identifier_()
@graken()
def _all_fields_reference_(self):
self._value_expression_primary_()
self._period_()
self._asterisk_()
with self._optional():
self._token('AS')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _query_expression_(self):
with self._optional():
self._with_clause_()
self._query_expression_body_()
@graken()
def _with_clause_(self):
self._token('WITH')
with self._optional():
self._token('RECURSIVE')
self._with_list_()
@graken()
def _with_list_(self):
def sep0():
self._token(',')
def block0():
self._with_list_element_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _with_list_element_(self):
self._identifier_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._token('AS')
self._subquery_()
with self._optional():
self._search_or_cycle_clause_()
@graken()
def _query_expression_body_(self):
with self._choice():
with self._option():
self._non_join_query_expression_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_expression_(self):
with self._choice():
with self._option():
self._non_join_query_term_()
with self._option():
self._query_expression_body_()
self._token('UNION')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
with self._optional():
self._corresponding_spec_()
self._query_term_()
with self._option():
self._query_expression_body_()
self._token('EXCEPT')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
with self._optional():
self._corresponding_spec_()
self._query_term_()
self._error('no available options')
@graken()
def _query_term_(self):
with self._choice():
with self._option():
self._non_join_query_term_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_term_(self):
with self._choice():
with self._option():
self._non_join_query_primary_()
with self._option():
self._query_term_()
self._token('INTERSECT')
with self._optional():
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._token('DISTINCT')
self._error('expecting one of: ALL DISTINCT')
with self._optional():
self._corresponding_spec_()
self._query_primary_()
self._error('no available options')
@graken()
def _query_primary_(self):
with self._choice():
with self._option():
self._non_join_query_primary_()
with self._option():
self._joined_table_()
self._error('no available options')
@graken()
def _non_join_query_primary_(self):
with self._choice():
with self._option():
self._simple_table_()
with self._option():
self._left_paren_()
self._non_join_query_expression_()
self._right_paren_()
self._error('no available options')
@graken()
def _simple_table_(self):
with self._choice():
with self._option():
self._query_specification_()
with self._option():
self._table_value_constructor_()
with self._option():
self._explicit_table_()
self._error('no available options')
@graken()
def _explicit_table_(self):
self._token('TABLE')
self._table_or_query_name_()
@graken()
def _corresponding_spec_(self):
self._token('CORRESPONDING')
with self._optional():
self._token('BY')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _search_or_cycle_clause_(self):
with self._choice():
with self._option():
self._search_clause_()
with self._option():
self._cycle_clause_()
with self._option():
self._search_clause_()
self._cycle_clause_()
self._error('no available options')
@graken()
def _search_clause_(self):
self._token('SEARCH')
self._recursive_search_order_()
self._token('SET')
self._identifier_()
@graken()
def _recursive_search_order_(self):
with self._choice():
with self._option():
self._token('DEPTH')
self._token('FIRST')
self._token('BY')
self._sort_specification_list_()
with self._option():
self._token('BREADTH')
self._token('FIRST')
self._token('BY')
self._sort_specification_list_()
self._error('no available options')
@graken()
def _cycle_clause_(self):
self._token('CYCLE')
self._cycle_column_list_()
self._token('SET')
self._identifier_()
self._token('TO')
self._value_expression_()
self._token('DEFAULT')
self._value_expression_()
self._token('USING')
self._identifier_()
@graken()
def _cycle_column_list_(self):
self._column_name_list_()
@graken()
def _subquery_(self):
self._left_paren_()
self._query_expression_()
self._right_paren_()
@graken()
def _predicate_(self):
with self._choice():
with self._option():
self._comparison_predicate_()
with self._option():
self._between_predicate_()
with self._option():
self._in_predicate_()
with self._option():
self._like_predicate_()
with self._option():
self._similar_predicate_()
with self._option():
self._null_predicate_()
with self._option():
self._quantified_comparison_predicate_()
with self._option():
self._exists_predicate_()
with self._option():
self._unique_predicate_()
with self._option():
self._normalized_predicate_()
with self._option():
self._match_predicate_()
with self._option():
self._overlaps_predicate_()
with self._option():
self._distinct_predicate_()
with self._option():
self._member_predicate_()
with self._option():
self._submultiset_predicate_()
with self._option():
self._set_predicate_()
with self._option():
self._type_predicate_()
self._error('no available options')
@graken()
def _comparison_predicate_(self):
self._row_value_predicand_()
self._comparison_predicate_part_2_()
@graken()
def _comparison_predicate_part_2_(self):
self._comp_op_()
self._row_value_predicand_()
@graken()
def _comp_op_(self):
with self._choice():
with self._option():
self._equals_operator_()
with self._option():
self._not_equals_operator_()
with self._option():
self._less_than_operator_()
with self._option():
self._greater_than_operator_()
with self._option():
self._less_than_or_equals_operator_()
with self._option():
self._greater_than_or_equals_operator_()
self._error('no available options')
@graken()
def _between_predicate_(self):
self._row_value_predicand_()
self._between_predicate_part_2_()
@graken()
def _between_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('BETWEEN')
with self._optional():
with self._choice():
with self._option():
self._token('ASYMMETRIC')
with self._option():
self._token('SYMMETRIC')
self._error('expecting one of: ASYMMETRIC SYMMETRIC')
self._row_value_predicand_()
self._token('AND')
self._row_value_predicand_()
@graken()
def _in_predicate_(self):
self._row_value_predicand_()
self._in_predicate_part_2_()
@graken()
def _in_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('IN')
self._in_predicate_value_()
@graken()
def _in_predicate_value_(self):
with self._choice():
with self._option():
self._subquery_()
with self._option():
self._left_paren_()
self._in_value_list_()
self._right_paren_()
self._error('no available options')
@graken()
def _in_value_list_(self):
def sep0():
self._token(',')
def block0():
self._row_value_expression_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _like_predicate_(self):
with self._choice():
with self._option():
self._character_like_predicate_()
with self._option():
self._octet_like_predicate_()
self._error('no available options')
@graken()
def _character_like_predicate_(self):
self._row_value_predicand_()
self._character_like_predicate_part_2_()
@graken()
def _character_like_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('LIKE')
self._character_value_expression_()
with self._optional():
self._token('ESCAPE')
self._escape_character_()
@graken()
def _escape_character_(self):
self._character_value_expression_()
@graken()
def _octet_like_predicate_(self):
self._row_value_predicand_()
self._octet_like_predicate_part_2_()
@graken()
def _octet_like_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('LIKE')
self._blob_value_expression_()
with self._optional():
self._token('ESCAPE')
self._blob_value_expression_()
@graken()
def _similar_predicate_(self):
self._row_value_predicand_()
self._similar_predicate_part_2_()
@graken()
def _similar_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('SIMILAR')
self._token('TO')
self._character_value_expression_()
with self._optional():
self._token('ESCAPE')
self._escape_character_()
@graken()
def _null_predicate_(self):
self._row_value_predicand_()
self._null_predicate_part_2_()
@graken()
def _null_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('NULL')
@graken()
def _quantified_comparison_predicate_(self):
self._row_value_predicand_()
self._quantified_comparison_predicate_part_2_()
@graken()
def _quantified_comparison_predicate_part_2_(self):
self._comp_op_()
self._quantifier_()
self._subquery_()
@graken()
def _quantifier_(self):
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._some_()
self._error('expecting one of: ALL')
@graken()
def _some_(self):
with self._choice():
with self._option():
self._token('SOME')
with self._option():
self._token('ANY')
self._error('expecting one of: ANY SOME')
@graken()
def _exists_predicate_(self):
self._token('EXISTS')
self._subquery_()
@graken()
def _unique_predicate_(self):
self._token('UNIQUE')
self._subquery_()
@graken()
def _normalized_predicate_(self):
self._string_value_expression_()
self._normalized_predicate_part_2_()
@graken()
def _normalized_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('NORMALIZED')
@graken()
def _match_predicate_(self):
self._row_value_predicand_()
self._match_predicate_part_2_()
@graken()
def _match_predicate_part_2_(self):
self._token('MATCH')
with self._optional():
self._token('UNIQUE')
with self._optional():
with self._choice():
with self._option():
self._token('SIMPLE')
with self._option():
self._token('PARTIAL')
with self._option():
self._token('FULL')
self._error('expecting one of: FULL PARTIAL SIMPLE')
self._subquery_()
@graken()
def _overlaps_predicate_(self):
self._overlaps_predicate_part_1_()
self._overlaps_predicate_part_2_()
@graken()
def _overlaps_predicate_part_1_(self):
self._row_value_predicand_()
@graken()
def _overlaps_predicate_part_2_(self):
self._token('OVERLAPS')
self._row_value_predicand_()
@graken()
def _distinct_predicate_(self):
self._row_value_predicand_()
self._distinct_predicate_part_2_()
@graken()
def _distinct_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('DISTINCT')
self._token('FROM')
self._row_value_predicand_()
@graken()
def _member_predicate_(self):
self._row_value_predicand_()
self._member_predicate_part_2_()
@graken()
def _member_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('MEMBER')
with self._optional():
self._token('OF')
self._multiset_value_expression_()
@graken()
def _submultiset_predicate_(self):
self._row_value_predicand_()
self._submultiset_predicate_part_2_()
@graken()
def _submultiset_predicate_part_2_(self):
with self._optional():
self._token('NOT')
self._token('SUBMULTISET')
with self._optional():
self._token('OF')
self._multiset_value_expression_()
@graken()
def _set_predicate_(self):
self._row_value_predicand_()
self._set_predicate_part_2_()
@graken()
def _set_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('A')
self._token('SET')
@graken()
def _type_predicate_(self):
self._row_value_predicand_()
self._type_predicate_part_2_()
@graken()
def _type_predicate_part_2_(self):
self._token('IS')
with self._optional():
self._token('NOT')
self._token('OF')
self._left_paren_()
self._type_list_()
self._right_paren_()
@graken()
def _type_list_(self):
def sep0():
self._token(',')
def block0():
self._user_defined_type_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _user_defined_type_specification_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._exclusive_user_defined_type_specification_()
self._error('no available options')
@graken()
def _exclusive_user_defined_type_specification_(self):
self._token('ONLY')
self._schema_qualified_name_()
@graken()
def _search_condition_(self):
self._boolean_value_expression_()
@graken()
def _interval_qualifier_(self):
with self._choice():
with self._option():
self._start_field_()
self._token('TO')
self._end_field_()
with self._option():
self._single_datetime_field_()
self._error('no available options')
@graken()
def _start_field_(self):
self._non_second_primary_datetime_field_()
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
@graken()
def _end_field_(self):
with self._choice():
with self._option():
self._non_second_primary_datetime_field_()
with self._option():
self._token('SECOND')
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
self._error('expecting one of: SECOND')
@graken()
def _single_datetime_field_(self):
with self._choice():
with self._option():
self._non_second_primary_datetime_field_()
with self._optional():
self._left_paren_()
self._precision_()
self._right_paren_()
with self._option():
self._token('SECOND')
with self._optional():
self._left_paren_()
self._precision_()
with self._optional():
self._comma_()
self._precision_()
self._right_paren_()
self._error('expecting one of: SECOND')
@graken()
def _primary_datetime_field_(self):
with self._choice():
with self._option():
self._non_second_primary_datetime_field_()
with self._option():
self._token('SECOND')
self._error('expecting one of: SECOND')
@graken()
def _non_second_primary_datetime_field_(self):
with self._choice():
with self._option():
self._token('YEAR')
with self._option():
self._token('MONTH')
with self._option():
self._token('DAY')
with self._option():
self._token('HOUR')
with self._option():
self._token('MINUTE')
self._error('expecting one of: DAY HOUR MINUTE MONTH YEAR')
@graken()
def _language_clause_(self):
self._token('LANGUAGE')
self._language_name_()
@graken()
def _language_name_(self):
with self._choice():
with self._option():
self._token('ADA')
with self._option():
self._token('C')
with self._option():
self._token('COBOL')
with self._option():
self._token('FORTRAN')
with self._option():
self._token('M')
with self._option():
self._token('MUMPS')
with self._option():
self._token('PASCAL')
with self._option():
self._token('PLI')
with self._option():
self._token('SQL')
self._error('expecting one of: ADA C COBOL FORTRAN M MUMPS PASCAL PLI SQL')
@graken()
def _path_specification_(self):
self._token('PATH')
self._schema_name_list_()
@graken()
def _schema_name_list_(self):
def sep0():
self._token(',')
def block0():
self._schema_name_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _routine_invocation_(self):
self._schema_qualified_name_()
self._sql_argument_list_()
@graken()
def _sql_argument_list_(self):
self._left_paren_()
with self._optional():
def sep0():
self._token(',')
def block0():
self._sql_argument_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _sql_argument_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._generalized_expression_()
with self._option():
self._target_specification_()
self._error('no available options')
@graken()
def _generalized_expression_(self):
self._value_expression_()
self._token('AS')
self._schema_qualified_name_()
@graken()
def _specific_routine_designator_(self):
with self._choice():
with self._option():
self._token('SPECIFIC')
self._routine_type_()
self._schema_qualified_name_()
with self._option():
self._routine_type_()
self._member_name_()
with self._optional():
self._token('FOR')
self._schema_qualified_name_()
self._error('no available options')
@graken()
def _routine_type_(self):
with self._choice():
with self._option():
self._token('ROUTINE')
with self._option():
self._token('FUNCTION')
with self._option():
self._token('PROCEDURE')
with self._option():
with self._optional():
with self._choice():
with self._option():
self._token('INSTANCE')
with self._option():
self._token('STATIC')
with self._option():
self._token('CONSTRUCTOR')
self._error('expecting one of: CONSTRUCTOR INSTANCE STATIC')
self._token('METHOD')
self._error('expecting one of: CONSTRUCTOR FUNCTION INSTANCE METHOD PROCEDURE ROUTINE STATIC')
@graken()
def _member_name_(self):
self._member_name_alternatives_()
with self._optional():
self._data_type_list_()
@graken()
def _member_name_alternatives_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._identifier_()
self._error('no available options')
@graken()
def _data_type_list_(self):
self._left_paren_()
with self._optional():
def sep0():
self._token(',')
def block0():
self._data_type_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _collate_clause_(self):
self._token('COLLATE')
self._schema_qualified_name_()
@graken()
def _constraint_name_definition_(self):
self._token('CONSTRAINT')
self._schema_qualified_name_()
@graken()
def _constraint_characteristics_(self):
with self._choice():
with self._option():
self._constraint_check_time_()
with self._optional():
with self._optional():
self._token('NOT')
self._token('DEFERRABLE')
with self._option():
with self._optional():
self._token('NOT')
self._token('DEFERRABLE')
with self._optional():
self._constraint_check_time_()
self._error('expecting one of: DEFERRABLE NOT')
@graken()
def _constraint_check_time_(self):
with self._choice():
with self._option():
self._token('INITIALLY')
self._token('DEFERRED')
with self._option():
self._token('INITIALLY')
self._token('IMMEDIATE')
self._error('expecting one of: INITIALLY')
@graken()
def _aggregate_function_(self):
with self._choice():
with self._option():
self._token('COUNT')
self._left_paren_()
self._asterisk_()
self._right_paren_()
with self._optional():
self._filter_clause_()
with self._option():
self._general_set_function_()
with self._optional():
self._filter_clause_()
with self._option():
self._binary_set_function_()
with self._optional():
self._filter_clause_()
with self._option():
self._ordered_set_function_()
with self._optional():
self._filter_clause_()
self._error('no available options')
@graken()
def _general_set_function_(self):
self._computational_operation_()
self._left_paren_()
with self._optional():
self._set_quantifier_()
self._value_expression_()
self._right_paren_()
@graken()
def _computational_operation_(self):
with self._choice():
with self._option():
self._token('AVG')
with self._option():
self._token('MAX')
with self._option():
self._token('MIN')
with self._option():
self._token('SUM')
with self._option():
self._token('EVERY')
with self._option():
self._token('ANY')
with self._option():
self._token('SOME')
with self._option():
self._token('COUNT')
with self._option():
self._token('STDDEV_POP')
with self._option():
self._token('STDDEV_SAMP')
with self._option():
self._token('VAR_SAMP')
with self._option():
self._token('VAR_POP')
with self._option():
self._token('COLLECT')
with self._option():
self._token('FUSION')
with self._option():
self._token('INTERSECTION')
self._error('expecting one of: ANY AVG COLLECT COUNT EVERY FUSION INTERSECTION MAX MIN SOME STDDEV_POP STDDEV_SAMP SUM VAR_POP VAR_SAMP')
@graken()
def _set_quantifier_(self):
with self._choice():
with self._option():
self._token('DISTINCT')
with self._option():
self._token('ALL')
self._error('expecting one of: ALL DISTINCT')
@graken()
def _filter_clause_(self):
self._token('FILTER')
self._left_paren_()
self._token('WHERE')
self._search_condition_()
self._right_paren_()
@graken()
def _binary_set_function_(self):
self._binary_set_function_type_()
self._left_paren_()
self._numeric_value_expression_()
self._comma_()
self._numeric_value_expression_()
self._right_paren_()
@graken()
def _binary_set_function_type_(self):
with self._choice():
with self._option():
self._token('COVAR_POP')
with self._option():
self._token('COVAR_SAMP')
with self._option():
self._token('CORR')
with self._option():
self._token('REGR_SLOPE')
with self._option():
self._token('REGR_INTERCEPT')
with self._option():
self._token('REGR_COUNT')
with self._option():
self._token('REGR_R2')
with self._option():
self._token('REGR_AVGX')
with self._option():
self._token('REGR_AVGY')
with self._option():
self._token('REGR_SXX')
with self._option():
self._token('REGR_SYY')
with self._option():
self._token('REGR_SXY')
self._error('expecting one of: CORR COVAR_POP COVAR_SAMP REGR_AVGX REGR_AVGY REGR_COUNT REGR_INTERCEPT REGR_R2 REGR_SLOPE REGR_SXX REGR_SXY REGR_SYY')
@graken()
def _ordered_set_function_(self):
with self._choice():
with self._option():
self._hypothetical_set_function_()
with self._option():
self._inverse_distribution_function_()
self._error('no available options')
@graken()
def _hypothetical_set_function_(self):
self._rank_function_type_()
self._left_paren_()
self._hypothetical_set_function_value_expression_list_()
self._right_paren_()
self._within_group_specification_()
@graken()
def _within_group_specification_(self):
self._token('WITHIN')
self._token('GROUP')
self._left_paren_()
self._order_by_clause_()
self._right_paren_()
@graken()
def _hypothetical_set_function_value_expression_list_(self):
def sep0():
self._token(',')
def block0():
self._value_expression_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _inverse_distribution_function_(self):
self._inverse_distribution_function_type_()
self._left_paren_()
self._numeric_value_expression_()
self._right_paren_()
self._within_group_specification_()
@graken()
def _inverse_distribution_function_type_(self):
with self._choice():
with self._option():
self._token('PERCENTILE_CONT')
with self._option():
self._token('PERCENTILE_DISC')
self._error('expecting one of: PERCENTILE_CONT PERCENTILE_DISC')
@graken()
def _sort_specification_list_(self):
def sep0():
self._token(',')
def block0():
self._sort_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _sort_specification_(self):
self._value_expression_()
with self._optional():
self._ordering_specification_()
with self._optional():
self._null_ordering_()
@graken()
def _ordering_specification_(self):
with self._choice():
with self._option():
self._token('ASC')
with self._option():
self._token('DESC')
self._error('expecting one of: ASC DESC')
@graken()
def _null_ordering_(self):
with self._choice():
with self._option():
self._token('NULLS')
self._token('FIRST')
with self._option():
self._token('NULLS')
self._token('LAST')
self._error('expecting one of: NULLS')
@graken()
def _schema_definition_(self):
self._token('CREATE')
self._token('SCHEMA')
self._schema_name_clause_()
with self._optional():
self._schema_character_set_or_path_()
with self._optional():
def block0():
self._schema_element_()
self._positive_closure(block0)
@graken()
def _schema_character_set_or_path_(self):
with self._choice():
with self._option():
self._schema_character_set_specification_()
with self._option():
self._schema_path_specification_()
with self._option():
self._schema_character_set_specification_()
self._schema_path_specification_()
with self._option():
self._schema_path_specification_()
self._schema_character_set_specification_()
self._error('no available options')
@graken()
def _schema_name_clause_(self):
with self._choice():
with self._option():
self._schema_name_()
with self._option():
self._token('AUTHORIZATION')
self._identifier_()
with self._option():
self._schema_name_()
self._token('AUTHORIZATION')
self._identifier_()
self._error('no available options')
@graken()
def _schema_character_set_specification_(self):
self._token('DEFAULT')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
@graken()
def _schema_path_specification_(self):
self._path_specification_()
@graken()
def _schema_element_(self):
with self._choice():
with self._option():
self._table_definition_()
with self._option():
self._view_definition_()
with self._option():
self._domain_definition_()
with self._option():
self._character_set_definition_()
with self._option():
self._collation_definition_()
with self._option():
self._transliteration_definition_()
with self._option():
self._assertion_definition_()
with self._option():
self._trigger_definition_()
with self._option():
self._user_defined_type_definition_()
with self._option():
self._user_defined_cast_definition_()
with self._option():
self._user_defined_ordering_definition_()
with self._option():
self._transform_definition_()
with self._option():
self._schema_routine_()
with self._option():
self._sequence_generator_definition_()
with self._option():
self._grant_statement_()
with self._option():
self._role_definition_()
self._error('no available options')
@graken()
def _drop_schema_statement_(self):
self._token('DROP')
self._token('SCHEMA')
self._schema_name_()
self._drop_behavior_()
@graken()
def _drop_behavior_(self):
with self._choice():
with self._option():
self._token('CASCADE')
with self._option():
self._token('RESTRICT')
self._error('expecting one of: CASCADE RESTRICT')
@graken()
def _table_definition_(self):
self._token('CREATE')
with self._optional():
self._table_scope_()
self._token('TABLE')
self._table_name_()
self._table_contents_source_()
with self._optional():
self._token('ON')
self._token('COMMIT')
self._table_commit_action_()
self._token('ROWS')
@graken()
def _table_contents_source_(self):
with self._choice():
with self._option():
self._table_element_list_()
with self._option():
self._typed_table_clause_()
with self._option():
self._as_subquery_clause_()
self._error('no available options')
@graken()
def _table_scope_(self):
self._global_or_local_()
self._token('TEMPORARY')
@graken()
def _global_or_local_(self):
with self._choice():
with self._option():
self._token('GLOBAL')
with self._option():
self._token('LOCAL')
self._error('expecting one of: GLOBAL LOCAL')
@graken()
def _table_commit_action_(self):
with self._choice():
with self._option():
self._token('PRESERVE')
with self._option():
self._token('DELETE')
self._error('expecting one of: DELETE PRESERVE')
@graken()
def _table_element_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._table_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _table_element_(self):
with self._choice():
with self._option():
self._column_definition_()
with self._option():
self._table_constraint_definition_()
with self._option():
self._like_clause_()
with self._option():
self._self_referencing_column_specification_()
with self._option():
self._column_options_()
self._error('no available options')
@graken()
def _typed_table_clause_(self):
self._token('OF')
self._schema_qualified_name_()
with self._optional():
self._subtable_clause_()
with self._optional():
self._table_element_list_()
@graken()
def _self_referencing_column_specification_(self):
self._token('REF')
self._token('IS')
self._identifier_()
self._reference_generation_()
@graken()
def _reference_generation_(self):
with self._choice():
with self._option():
self._token('SYSTEM')
self._token('GENERATED')
with self._option():
self._token('USER')
self._token('GENERATED')
with self._option():
self._token('DERIVED')
self._error('expecting one of: DERIVED SYSTEM USER')
@graken()
def _column_options_(self):
self._identifier_()
self._token('WITH')
self._token('OPTIONS')
self._column_option_list_()
@graken()
def _column_option_list_(self):
with self._optional():
self._scope_clause_()
with self._optional():
self._default_clause_()
with self._optional():
def block0():
self._column_constraint_definition_()
self._positive_closure(block0)
@graken()
def _subtable_clause_(self):
self._token('UNDER')
self._table_name_()
@graken()
def _like_clause_(self):
self._token('LIKE')
self._table_name_()
with self._optional():
self._like_options_()
@graken()
def _like_options_(self):
with self._choice():
with self._option():
self._identity_option_()
with self._option():
self._column_default_option_()
self._error('no available options')
@graken()
def _identity_option_(self):
with self._choice():
with self._option():
self._token('INCLUDING')
self._token('IDENTITY')
with self._option():
self._token('EXCLUDING')
self._token('IDENTITY')
self._error('expecting one of: EXCLUDING INCLUDING')
@graken()
def _column_default_option_(self):
with self._choice():
with self._option():
self._token('INCLUDING')
self._token('DEFAULTS')
with self._option():
self._token('EXCLUDING')
self._token('DEFAULTS')
self._error('expecting one of: EXCLUDING INCLUDING')
@graken()
def _as_subquery_clause_(self):
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._token('AS')
self._subquery_()
self._with_or_without_data_()
@graken()
def _with_or_without_data_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('NO')
self._token('DATA')
with self._option():
self._token('WITH')
self._token('DATA')
self._error('expecting one of: WITH')
@graken()
def _column_definition_(self):
self._identifier_()
with self._optional():
self._data_type_or_domain_name_()
with self._optional():
self._reference_scope_check_()
with self._optional():
with self._choice():
with self._option():
self._default_clause_()
with self._option():
self._identity_column_specification_()
with self._option():
self._generation_clause_()
self._error('no available options')
with self._optional():
def block1():
self._column_constraint_definition_()
self._positive_closure(block1)
with self._optional():
self._collate_clause_()
@graken()
def _data_type_or_domain_name_(self):
with self._choice():
with self._option():
self._data_type_()
with self._option():
self._schema_qualified_name_()
self._error('no available options')
@graken()
def _column_constraint_definition_(self):
with self._optional():
self._constraint_name_definition_()
self._column_constraint_()
with self._optional():
self._constraint_characteristics_()
@graken()
def _column_constraint_(self):
with self._choice():
with self._option():
self._token('NOT')
self._token('NULL')
with self._option():
self._unique_specification_()
with self._option():
self._references_specification_()
with self._option():
self._check_constraint_definition_()
self._error('expecting one of: NOT')
@graken()
def _identity_column_specification_(self):
self._token('GENERATED')
with self._group():
with self._choice():
with self._option():
self._token('ALWAYS')
with self._option():
self._token('BY')
self._token('DEFAULT')
self._error('expecting one of: ALWAYS BY')
self._token('AS')
self._token('IDENTITY')
with self._optional():
self._left_paren_()
self._common_sequence_generator_options_()
self._right_paren_()
@graken()
def _generation_clause_(self):
self._generation_rule_()
self._token('AS')
self._generation_expression_()
@graken()
def _generation_rule_(self):
self._token('GENERATED')
self._token('ALWAYS')
@graken()
def _generation_expression_(self):
self._left_paren_()
self._value_expression_()
self._right_paren_()
@graken()
def _default_clause_(self):
self._token('DEFAULT')
self._default_option_()
@graken()
def _default_option_(self):
with self._choice():
with self._option():
self._literal_()
with self._option():
self._datetime_value_function_()
with self._option():
self._token('USER')
with self._option():
self._token('CURRENT_USER')
with self._option():
self._token('CURRENT_ROLE')
with self._option():
self._token('SESSION_USER')
with self._option():
self._token('SYSTEM_USER')
with self._option():
self._token('CURRENT_PATH')
with self._option():
self._implicitly_typed_value_specification_()
self._error('expecting one of: CURRENT_PATH CURRENT_ROLE CURRENT_USER SESSION_USER SYSTEM_USER USER')
@graken()
def _table_constraint_definition_(self):
with self._optional():
self._constraint_name_definition_()
self._table_constraint_()
with self._optional():
self._constraint_characteristics_()
@graken()
def _table_constraint_(self):
with self._choice():
with self._option():
self._unique_constraint_definition_()
with self._option():
self._referential_constraint_definition_()
with self._option():
self._check_constraint_definition_()
self._error('no available options')
@graken()
def _unique_constraint_definition_(self):
with self._choice():
with self._option():
self._unique_specification_()
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('UNIQUE')
self._token('(')
self._token('VALUE')
self._token(')')
self._error('expecting one of: UNIQUE')
@graken()
def _unique_specification_(self):
with self._choice():
with self._option():
self._token('UNIQUE')
with self._option():
self._token('PRIMARY')
self._token('KEY')
self._error('expecting one of: PRIMARY UNIQUE')
@graken()
def _referential_constraint_definition_(self):
self._token('FOREIGN')
self._token('KEY')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
self._references_specification_()
@graken()
def _references_specification_(self):
self._token('REFERENCES')
self._referenced_table_and_columns_()
with self._optional():
self._token('MATCH')
self._match_type_()
with self._optional():
self._referential_triggered_action_()
@graken()
def _match_type_(self):
with self._choice():
with self._option():
self._token('FULL')
with self._option():
self._token('PARTIAL')
with self._option():
self._token('SIMPLE')
self._error('expecting one of: FULL PARTIAL SIMPLE')
@graken()
def _referenced_table_and_columns_(self):
self._table_name_()
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _referential_triggered_action_(self):
with self._choice():
with self._option():
self._update_rule_()
with self._optional():
self._delete_rule_()
with self._option():
self._delete_rule_()
with self._optional():
self._update_rule_()
self._error('no available options')
@graken()
def _update_rule_(self):
self._token('ON')
self._token('UPDATE')
self._referential_action_()
@graken()
def _delete_rule_(self):
self._token('ON')
self._token('DELETE')
self._referential_action_()
@graken()
def _referential_action_(self):
with self._choice():
with self._option():
self._token('CASCADE')
with self._option():
self._token('SET')
self._token('NULL')
with self._option():
self._token('SET')
self._token('DEFAULT')
with self._option():
self._token('RESTRICT')
with self._option():
self._token('NO')
self._token('ACTION')
self._error('expecting one of: CASCADE NO RESTRICT SET')
@graken()
def _check_constraint_definition_(self):
self._token('CHECK')
self._left_paren_()
self._search_condition_()
self._right_paren_()
@graken()
def _alter_table_statement_(self):
self._token('ALTER')
self._token('TABLE')
self._table_name_()
self._alter_table_action_()
@graken()
def _alter_table_action_(self):
with self._choice():
with self._option():
self._add_column_definition_()
with self._option():
self._alter_column_definition_()
with self._option():
self._drop_column_definition_()
with self._option():
self._add_table_constraint_definition_()
with self._option():
self._drop_table_constraint_definition_()
self._error('no available options')
@graken()
def _add_column_definition_(self):
self._token('ADD')
with self._optional():
self._token('COLUMN')
self._column_definition_()
@graken()
def _alter_column_definition_(self):
self._token('ALTER')
with self._optional():
self._token('COLUMN')
self._identifier_()
self._alter_column_action_()
@graken()
def _alter_column_action_(self):
with self._choice():
with self._option():
self._set_column_default_clause_()
with self._option():
self._drop_column_default_clause_()
with self._option():
self._add_column_scope_clause_()
with self._option():
self._drop_column_scope_clause_()
with self._option():
self._alter_identity_column_specification_()
self._error('no available options')
@graken()
def _set_column_default_clause_(self):
self._token('SET')
self._default_clause_()
@graken()
def _drop_column_default_clause_(self):
self._token('DROP')
self._token('DEFAULT')
@graken()
def _add_column_scope_clause_(self):
self._token('ADD')
self._scope_clause_()
@graken()
def _drop_column_scope_clause_(self):
self._token('DROP')
self._token('SCOPE')
self._drop_behavior_()
@graken()
def _alter_identity_column_specification_(self):
def block0():
self._alter_identity_column_option_()
self._positive_closure(block0)
@graken()
def _alter_identity_column_option_(self):
with self._choice():
with self._option():
self._alter_sequence_generator_restart_option_()
with self._option():
self._token('SET')
self._basic_sequence_generator_option_()
self._error('no available options')
@graken()
def _drop_column_definition_(self):
self._token('DROP')
with self._optional():
self._token('COLUMN')
self._identifier_()
self._drop_behavior_()
@graken()
def _add_table_constraint_definition_(self):
self._token('ADD')
self._table_constraint_definition_()
@graken()
def _drop_table_constraint_definition_(self):
self._token('DROP')
self._token('CONSTRAINT')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _drop_table_statement_(self):
self._token('DROP')
self._token('TABLE')
self._table_name_()
self._drop_behavior_()
@graken()
def _view_definition_(self):
self._token('CREATE')
with self._optional():
self._token('RECURSIVE')
self._token('VIEW')
self._table_name_()
self._view_specification_()
self._token('AS')
self._query_expression_()
with self._optional():
self._token('WITH')
with self._optional():
self._levels_clause_()
self._token('CHECK')
self._token('OPTION')
@graken()
def _view_specification_(self):
with self._choice():
with self._option():
self._regular_view_specification_()
with self._option():
self._referenceable_view_specification_()
self._error('no available options')
@graken()
def _regular_view_specification_(self):
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
@graken()
def _referenceable_view_specification_(self):
self._token('OF')
self._schema_qualified_name_()
with self._optional():
self._subview_clause_()
with self._optional():
self._view_element_list_()
@graken()
def _subview_clause_(self):
self._token('UNDER')
self._table_name_()
@graken()
def _view_element_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._view_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _view_element_(self):
with self._choice():
with self._option():
self._self_referencing_column_specification_()
with self._option():
self._view_column_option_()
self._error('no available options')
@graken()
def _view_column_option_(self):
self._identifier_()
self._token('WITH')
self._token('OPTIONS')
self._scope_clause_()
@graken()
def _levels_clause_(self):
with self._choice():
with self._option():
self._token('CASCADED')
with self._option():
self._token('LOCAL')
self._error('expecting one of: CASCADED LOCAL')
@graken()
def _drop_view_statement_(self):
self._token('DROP')
self._token('VIEW')
self._table_name_()
self._drop_behavior_()
@graken()
def _domain_definition_(self):
self._token('CREATE')
self._token('DOMAIN')
self._schema_qualified_name_()
with self._optional():
self._token('AS')
self._data_type_()
with self._optional():
self._default_clause_()
with self._optional():
def block0():
self._domain_constraint_()
self._positive_closure(block0)
with self._optional():
self._collate_clause_()
@graken()
def _domain_constraint_(self):
with self._optional():
self._constraint_name_definition_()
self._check_constraint_definition_()
with self._optional():
self._constraint_characteristics_()
@graken()
def _alter_domain_statement_(self):
self._token('ALTER')
self._token('DOMAIN')
self._schema_qualified_name_()
self._alter_domain_action_()
@graken()
def _alter_domain_action_(self):
with self._choice():
with self._option():
self._set_domain_default_clause_()
with self._option():
self._drop_domain_default_clause_()
with self._option():
self._add_domain_constraint_definition_()
with self._option():
self._drop_domain_constraint_definition_()
self._error('no available options')
@graken()
def _set_domain_default_clause_(self):
self._token('SET')
self._default_clause_()
@graken()
def _drop_domain_default_clause_(self):
self._token('DROP')
self._token('DEFAULT')
@graken()
def _add_domain_constraint_definition_(self):
self._token('ADD')
self._domain_constraint_()
@graken()
def _drop_domain_constraint_definition_(self):
self._token('DROP')
self._token('CONSTRAINT')
self._schema_qualified_name_()
@graken()
def _drop_domain_statement_(self):
self._token('DROP')
self._token('DOMAIN')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _character_set_definition_(self):
self._token('CREATE')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._optional():
self._token('AS')
self._character_set_source_()
with self._optional():
self._collate_clause_()
@graken()
def _character_set_source_(self):
self._token('GET')
self._character_set_name_()
@graken()
def _drop_character_set_statement_(self):
self._token('DROP')
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
@graken()
def _collation_definition_(self):
self._token('CREATE')
self._token('COLLATION')
self._schema_qualified_name_()
self._token('FOR')
self._character_set_name_()
self._token('FROM')
self._schema_qualified_name_()
with self._optional():
self._pad_characteristic_()
@graken()
def _pad_characteristic_(self):
with self._choice():
with self._option():
self._token('NO')
self._token('PAD')
with self._option():
self._token('PAD')
self._token('SPACE')
self._error('expecting one of: NO PAD')
@graken()
def _drop_collation_statement_(self):
self._token('DROP')
self._token('COLLATION')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _transliteration_definition_(self):
self._token('CREATE')
self._token('TRANSLATION')
self._schema_qualified_name_()
self._token('FOR')
self._character_set_name_()
self._token('TO')
self._character_set_name_()
self._token('FROM')
self._transliteration_source_()
@graken()
def _transliteration_source_(self):
with self._choice():
with self._option():
self._schema_qualified_name_()
with self._option():
self._specific_routine_designator_()
self._error('no available options')
@graken()
def _drop_transliteration_statement_(self):
self._token('DROP')
self._token('TRANSLATION')
self._schema_qualified_name_()
@graken()
def _assertion_definition_(self):
self._token('CREATE')
self._token('ASSERTION')
self._schema_qualified_name_()
self._token('CHECK')
self._left_paren_()
self._search_condition_()
self._right_paren_()
with self._optional():
self._constraint_characteristics_()
@graken()
def _drop_assertion_statement_(self):
self._token('DROP')
self._token('ASSERTION')
self._schema_qualified_name_()
@graken()
def _trigger_definition_(self):
self._token('CREATE')
self._token('TRIGGER')
self._schema_qualified_name_()
self._trigger_action_time_()
self._trigger_event_()
self._token('ON')
self._table_name_()
with self._optional():
self._token('REFERENCING')
self._old_or_new_values_alias_list_()
self._triggered_action_()
@graken()
def _trigger_action_time_(self):
with self._choice():
with self._option():
self._token('BEFORE')
with self._option():
self._token('AFTER')
self._error('expecting one of: AFTER BEFORE')
@graken()
def _trigger_event_(self):
with self._choice():
with self._option():
self._token('INSERT')
with self._option():
self._token('DELETE')
with self._option():
self._token('UPDATE')
with self._optional():
self._token('OF')
self._column_name_list_()
self._error('expecting one of: DELETE INSERT UPDATE')
@graken()
def _triggered_action_(self):
with self._optional():
self._token('FOR')
self._token('EACH')
with self._group():
with self._choice():
with self._option():
self._token('ROW')
with self._option():
self._token('STATEMENT')
self._error('expecting one of: ROW STATEMENT')
with self._optional():
self._token('WHEN')
self._left_paren_()
self._search_condition_()
self._right_paren_()
self._triggered_sql_statement_()
@graken()
def _triggered_sql_statement_(self):
with self._choice():
with self._option():
self._sql_procedure_statement_()
with self._option():
self._token('BEGIN')
self._token('ATOMIC')
def block0():
self._sql_procedure_statement_()
self._semicolon_()
self._positive_closure(block0)
self._token('END')
self._error('no available options')
@graken()
def _old_or_new_values_alias_list_(self):
def block0():
self._old_or_new_values_alias_()
self._positive_closure(block0)
@graken()
def _old_or_new_values_alias_(self):
with self._choice():
with self._option():
self._token('OLD')
with self._optional():
self._token('ROW')
self._as_clause_()
with self._option():
self._token('NEW')
with self._optional():
self._token('ROW')
self._as_clause_()
with self._option():
self._token('OLD')
self._token('TABLE')
self._as_clause_()
with self._option():
self._token('NEW')
self._token('TABLE')
self._as_clause_()
self._error('no available options')
@graken()
def _drop_trigger_statement_(self):
self._token('DROP')
self._token('TRIGGER')
self._schema_qualified_name_()
@graken()
def _user_defined_type_definition_(self):
self._token('CREATE')
self._token('TYPE')
self._user_defined_type_body_()
@graken()
def _user_defined_type_body_(self):
self._schema_qualified_name_()
with self._optional():
self._subtype_clause_()
with self._optional():
self._token('AS')
self._representation_()
with self._optional():
self._user_defined_type_option_list_()
with self._optional():
self._method_specification_list_()
@graken()
def _user_defined_type_option_list_(self):
self._user_defined_type_option_()
with self._optional():
def block0():
self._user_defined_type_option_()
self._positive_closure(block0)
@graken()
def _user_defined_type_option_(self):
with self._choice():
with self._option():
self._instantiable_clause_()
with self._option():
self._finality_()
with self._option():
self._reference_type_specification_()
with self._option():
self._ref_cast_option_()
with self._option():
self._cast_option_()
self._error('no available options')
@graken()
def _subtype_clause_(self):
self._token('UNDER')
self._schema_qualified_name_()
@graken()
def _representation_(self):
with self._choice():
with self._option():
self._predefined_type_()
with self._option():
self._member_list_()
self._error('no available options')
@graken()
def _member_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._member_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _member_(self):
self._attribute_definition_()
@graken()
def _instantiable_clause_(self):
with self._choice():
with self._option():
self._token('INSTANTIABLE')
with self._option():
self._token('NOT')
self._token('INSTANTIABLE')
self._error('expecting one of: INSTANTIABLE NOT')
@graken()
def _finality_(self):
with self._choice():
with self._option():
self._token('FINAL')
with self._option():
self._token('NOT')
self._token('FINAL')
self._error('expecting one of: FINAL NOT')
@graken()
def _reference_type_specification_(self):
with self._choice():
with self._option():
self._user_defined_representation_()
with self._option():
self._derived_representation_()
with self._option():
self._system_generated_representation_()
self._error('no available options')
@graken()
def _user_defined_representation_(self):
self._token('REF')
self._token('USING')
self._predefined_type_()
@graken()
def _derived_representation_(self):
self._token('REF')
self._token('FROM')
self._list_of_attributes_()
@graken()
def _system_generated_representation_(self):
self._token('REF')
self._token('IS')
self._token('SYSTEM')
self._token('GENERATED')
@graken()
def _cast_to_ref_(self):
self._token('CAST')
self._left_paren_()
self._token('SOURCE')
self._token('AS')
self._token('REF')
self._right_paren_()
self._token('WITH')
self._identifier_()
@graken()
def _cast_to_type_(self):
self._token('CAST')
self._left_paren_()
self._token('REF')
self._token('AS')
self._token('SOURCE')
self._right_paren_()
self._token('WITH')
self._identifier_()
@graken()
def _list_of_attributes_(self):
self._left_paren_()
self._identifier_list_()
self._right_paren_()
@graken()
def _cast_to_distinct_(self):
self._token('CAST')
self._left_paren_()
self._token('SOURCE')
self._token('AS')
self._token('DISTINCT')
self._right_paren_()
self._token('WITH')
self._identifier_()
@graken()
def _cast_to_source_(self):
self._token('CAST')
self._left_paren_()
self._token('DISTINCT')
self._token('AS')
self._token('SOURCE')
self._right_paren_()
self._token('WITH')
self._identifier_()
@graken()
def _method_specification_list_(self):
def sep0():
self._token(',')
def block0():
self._method_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _method_specification_(self):
with self._choice():
with self._option():
self._original_method_specification_()
with self._option():
self._overriding_method_specification_()
self._error('no available options')
@graken()
def _original_method_specification_(self):
self._partial_method_specification_()
with self._optional():
self._token('SELF')
self._token('AS')
self._token('RESULT')
with self._optional():
self._token('SELF')
self._token('AS')
self._token('LOCATOR')
with self._optional():
self._method_characteristics_()
@graken()
def _overriding_method_specification_(self):
self._token('OVERRIDING')
self._partial_method_specification_()
@graken()
def _partial_method_specification_(self):
with self._optional():
with self._choice():
with self._option():
self._token('INSTANCE')
with self._option():
self._token('STATIC')
with self._option():
self._token('CONSTRUCTOR')
self._error('expecting one of: CONSTRUCTOR INSTANCE STATIC')
self._token('METHOD')
self._identifier_()
self._sql_parameter_declaration_list_()
self._returns_clause_()
with self._optional():
self._token('SPECIFIC')
self._schema_qualified_name_()
@graken()
def _method_characteristics_(self):
def block0():
self._method_characteristic_()
self._positive_closure(block0)
@graken()
def _method_characteristic_(self):
with self._choice():
with self._option():
self._language_clause_()
with self._option():
self._parameter_style_clause_()
with self._option():
self._deterministic_characteristic_()
with self._option():
self._sql_data_access_indication_()
with self._option():
self._null_call_clause_()
self._error('no available options')
@graken()
def _attribute_definition_(self):
self._identifier_()
self._data_type_()
with self._optional():
self._reference_scope_check_()
with self._optional():
self._default_clause_()
with self._optional():
self._collate_clause_()
@graken()
def _alter_type_statement_(self):
self._token('ALTER')
self._token('TYPE')
self._schema_qualified_name_()
self._alter_type_action_()
@graken()
def _alter_type_action_(self):
with self._choice():
with self._option():
self._add_attribute_definition_()
with self._option():
self._drop_attribute_definition_()
with self._option():
self._add_original_method_specification_()
with self._option():
self._add_overriding_method_specification_()
with self._option():
self._drop_method_specification_()
self._error('no available options')
@graken()
def _add_attribute_definition_(self):
self._token('ADD')
self._token('ATTRIBUTE')
self._attribute_definition_()
@graken()
def _drop_attribute_definition_(self):
self._token('DROP')
self._token('ATTRIBUTE')
self._identifier_()
self._token('RESTRICT')
@graken()
def _add_original_method_specification_(self):
self._token('ADD')
self._original_method_specification_()
@graken()
def _add_overriding_method_specification_(self):
self._token('ADD')
self._overriding_method_specification_()
@graken()
def _drop_method_specification_(self):
self._token('DROP')
self._specific_method_specification_designator_()
self._token('RESTRICT')
@graken()
def _specific_method_specification_designator_(self):
with self._optional():
with self._choice():
with self._option():
self._token('INSTANCE')
with self._option():
self._token('STATIC')
with self._option():
self._token('CONSTRUCTOR')
self._error('expecting one of: CONSTRUCTOR INSTANCE STATIC')
self._token('METHOD')
self._identifier_()
self._data_type_list_()
@graken()
def _drop_data_type_statement_(self):
self._token('DROP')
self._token('TYPE')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _schema_routine_(self):
with self._choice():
with self._option():
self._schema_procedure_()
with self._option():
self._schema_function_()
self._error('no available options')
@graken()
def _schema_procedure_(self):
self._token('CREATE')
self._sql_invoked_procedure_()
@graken()
def _schema_function_(self):
self._token('CREATE')
self._sql_invoked_function_()
@graken()
def _sql_invoked_procedure_(self):
self._token('PROCEDURE')
self._schema_qualified_name_()
self._sql_parameter_declaration_list_()
self._routine_characteristics_()
self._routine_body_()
@graken()
def _sql_invoked_function_(self):
with self._group():
with self._choice():
with self._option():
self._function_specification_()
with self._option():
self._method_specification_designator_()
self._error('no available options')
self._routine_body_()
@graken()
def _sql_parameter_declaration_list_(self):
self._left_paren_()
with self._optional():
def sep0():
self._token(',')
def block0():
self._sql_parameter_declaration_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _sql_parameter_declaration_(self):
with self._optional():
self._parameter_mode_()
with self._optional():
self._identifier_()
self._parameter_type_()
with self._optional():
self._token('RESULT')
@graken()
def _parameter_mode_(self):
with self._choice():
with self._option():
self._token('IN')
with self._option():
self._token('OUT')
with self._option():
self._token('INOUT')
self._error('expecting one of: IN INOUT OUT')
@graken()
def _parameter_type_(self):
self._data_type_()
with self._optional():
self._locator_indication_()
@graken()
def _locator_indication_(self):
self._token('AS')
self._token('LOCATOR')
@graken()
def _function_specification_(self):
self._token('FUNCTION')
self._schema_qualified_name_()
self._sql_parameter_declaration_list_()
self._returns_clause_()
self._routine_characteristics_()
with self._optional():
self._dispatch_clause_()
@graken()
def _method_specification_designator_(self):
with self._choice():
with self._option():
self._token('SPECIFIC')
self._token('METHOD')
self._schema_qualified_name_()
with self._option():
with self._optional():
with self._choice():
with self._option():
self._token('INSTANCE')
with self._option():
self._token('STATIC')
with self._option():
self._token('CONSTRUCTOR')
self._error('expecting one of: CONSTRUCTOR INSTANCE STATIC')
self._token('METHOD')
self._identifier_()
self._sql_parameter_declaration_list_()
with self._optional():
self._returns_clause_()
self._token('FOR')
self._schema_qualified_name_()
self._error('no available options')
@graken()
def _routine_characteristics_(self):
with self._optional():
def block0():
self._routine_characteristic_()
self._positive_closure(block0)
@graken()
def _routine_characteristic_(self):
with self._choice():
with self._option():
self._language_clause_()
with self._option():
self._parameter_style_clause_()
with self._option():
self._token('SPECIFIC')
self._schema_qualified_name_()
with self._option():
self._deterministic_characteristic_()
with self._option():
self._sql_data_access_indication_()
with self._option():
self._null_call_clause_()
with self._option():
self._dynamic_result_sets_characteristic_()
with self._option():
self._savepoint_level_indication_()
self._error('no available options')
@graken()
def _savepoint_level_indication_(self):
with self._choice():
with self._option():
self._token('NEW')
self._token('SAVEPOINT')
self._token('LEVEL')
with self._option():
self._token('OLD')
self._token('SAVEPOINT')
self._token('LEVEL')
self._error('expecting one of: NEW OLD')
@graken()
def _dynamic_result_sets_characteristic_(self):
self._token('DYNAMIC')
self._token('RESULT')
self._token('SETS')
self._unsigned_integer_()
@graken()
def _parameter_style_clause_(self):
self._token('PARAMETER')
self._token('STYLE')
self._parameter_style_()
@graken()
def _dispatch_clause_(self):
self._token('STATIC')
self._token('DISPATCH')
@graken()
def _returns_clause_(self):
self._token('RETURNS')
self._returns_type_()
@graken()
def _returns_type_(self):
with self._choice():
with self._option():
self._returns_data_type_()
with self._optional():
self._result_cast_()
with self._option():
self._returns_table_type_()
self._error('no available options')
@graken()
def _returns_table_type_(self):
self._token('TABLE')
self._table_function_column_list_()
@graken()
def _table_function_column_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._table_function_column_list_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _table_function_column_list_element_(self):
self._identifier_()
self._data_type_()
@graken()
def _result_cast_(self):
self._token('CAST')
self._token('FROM')
self._result_cast_from_type_()
@graken()
def _result_cast_from_type_(self):
self._data_type_()
with self._optional():
self._locator_indication_()
@graken()
def _returns_data_type_(self):
self._data_type_()
with self._optional():
self._locator_indication_()
@graken()
def _routine_body_(self):
with self._choice():
with self._option():
self._sql_routine_spec_()
with self._option():
self._external_body_reference_()
self._error('no available options')
@graken()
def _sql_routine_spec_(self):
with self._optional():
self._rights_clause_()
self._sql_procedure_statement_()
@graken()
def _rights_clause_(self):
with self._choice():
with self._option():
self._token('SQL')
self._token('SECURITY')
self._token('INVOKER')
with self._option():
self._token('SQL')
self._token('SECURITY')
self._token('DEFINER')
self._error('expecting one of: SQL')
@graken()
def _external_body_reference_(self):
self._token('EXTERNAL')
with self._optional():
self._token('NAME')
self._external_routine_name_()
with self._optional():
self._parameter_style_clause_()
with self._optional():
self._transform_group_specification_()
with self._optional():
self._external_security_clause_()
@graken()
def _external_security_clause_(self):
with self._choice():
with self._option():
self._token('EXTERNAL')
self._token('SECURITY')
self._token('DEFINER')
with self._option():
self._token('EXTERNAL')
self._token('SECURITY')
self._token('INVOKER')
with self._option():
self._token('EXTERNAL')
self._token('SECURITY')
self._token('IMPLEMENTATION')
self._token('DEFINED')
self._error('expecting one of: EXTERNAL')
@graken()
def _parameter_style_(self):
with self._choice():
with self._option():
self._token('SQL')
with self._option():
self._token('GENERAL')
self._error('expecting one of: GENERAL SQL')
@graken()
def _deterministic_characteristic_(self):
with self._choice():
with self._option():
self._token('DETERMINISTIC')
with self._option():
self._token('NOT')
self._token('DETERMINISTIC')
self._error('expecting one of: DETERMINISTIC NOT')
@graken()
def _sql_data_access_indication_(self):
with self._choice():
with self._option():
self._token('NO')
self._token('SQL')
with self._option():
self._token('CONTAINS')
self._token('SQL')
with self._option():
self._token('READS')
self._token('SQL')
self._token('DATA')
with self._option():
self._token('MODIFIES')
self._token('SQL')
self._token('DATA')
self._error('expecting one of: CONTAINS MODIFIES NO READS')
@graken()
def _null_call_clause_(self):
with self._choice():
with self._option():
self._token('RETURNS')
self._token('NULL')
self._token('ON')
self._token('NULL')
self._token('INPUT')
with self._option():
self._token('CALLED')
self._token('ON')
self._token('NULL')
self._token('INPUT')
self._error('expecting one of: CALLED RETURNS')
@graken()
def _transform_group_specification_(self):
self._token('TRANSFORM')
self._token('GROUP')
with self._group():
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._multiple_group_specification_()
self._error('no available options')
@graken()
def _multiple_group_specification_(self):
def sep0():
self._token(',')
def block0():
self._group_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _group_specification_(self):
self._identifier_()
self._token('FOR')
self._token('TYPE')
self._schema_qualified_name_()
@graken()
def _alter_routine_statement_(self):
self._token('ALTER')
self._specific_routine_designator_()
self._alter_routine_characteristics_()
self._token('RESTRICT')
@graken()
def _alter_routine_characteristics_(self):
def block0():
self._alter_routine_characteristic_()
self._positive_closure(block0)
@graken()
def _alter_routine_characteristic_(self):
with self._choice():
with self._option():
self._language_clause_()
with self._option():
self._parameter_style_clause_()
with self._option():
self._sql_data_access_indication_()
with self._option():
self._null_call_clause_()
with self._option():
self._dynamic_result_sets_characteristic_()
with self._option():
self._token('NAME')
self._external_routine_name_()
self._error('no available options')
@graken()
def _drop_routine_statement_(self):
self._token('DROP')
self._specific_routine_designator_()
self._drop_behavior_()
@graken()
def _user_defined_cast_definition_(self):
self._token('CREATE')
self._token('CAST')
self._left_paren_()
self._source_data_type_()
self._token('AS')
self._target_data_type_()
self._right_paren_()
self._token('WITH')
self._specific_routine_designator_()
with self._optional():
self._token('AS')
self._token('ASSIGNMENT')
@graken()
def _source_data_type_(self):
self._data_type_()
@graken()
def _target_data_type_(self):
self._data_type_()
@graken()
def _drop_user_defined_cast_statement_(self):
self._token('DROP')
self._token('CAST')
self._left_paren_()
self._source_data_type_()
self._token('AS')
self._target_data_type_()
self._right_paren_()
self._drop_behavior_()
@graken()
def _user_defined_ordering_definition_(self):
self._token('CREATE')
self._token('ORDERING')
self._token('FOR')
self._schema_qualified_name_()
self._ordering_form_()
@graken()
def _ordering_form_(self):
with self._choice():
with self._option():
self._equals_ordering_form_()
with self._option():
self._full_ordering_form_()
self._error('no available options')
@graken()
def _equals_ordering_form_(self):
self._token('EQUALS')
self._token('ONLY')
self._token('BY')
self._ordering_category_()
@graken()
def _full_ordering_form_(self):
self._token('ORDER')
self._token('FULL')
self._token('BY')
self._ordering_category_()
@graken()
def _ordering_category_(self):
with self._choice():
with self._option():
self._relative_category_()
with self._option():
self._map_category_()
with self._option():
self._state_category_()
self._error('no available options')
@graken()
def _relative_category_(self):
self._token('RELATIVE')
self._token('WITH')
self._specific_routine_designator_()
@graken()
def _map_category_(self):
self._token('MAP')
self._token('WITH')
self._specific_routine_designator_()
@graken()
def _state_category_(self):
self._token('STATE')
with self._optional():
self._schema_qualified_name_()
@graken()
def _drop_user_defined_ordering_statement_(self):
self._token('DROP')
self._token('ORDERING')
self._token('FOR')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _transform_definition_(self):
self._token('CREATE')
with self._group():
with self._choice():
with self._option():
self._token('TRANSFORM')
with self._option():
self._token('TRANSFORMS')
self._error('expecting one of: TRANSFORM TRANSFORMS')
self._token('FOR')
self._schema_qualified_name_()
def block1():
self._transform_group_()
self._positive_closure(block1)
@graken()
def _transform_group_(self):
self._identifier_()
self._left_paren_()
self._transform_element_list_()
self._right_paren_()
@graken()
def _transform_element_list_(self):
self._transform_element_()
with self._optional():
self._comma_()
self._transform_element_()
@graken()
def _transform_element_(self):
with self._choice():
with self._option():
self._to_sql_()
with self._option():
self._from_sql_()
self._error('no available options')
@graken()
def _to_sql_(self):
self._token('TO')
self._token('SQL')
self._token('WITH')
self._specific_routine_designator_()
@graken()
def _from_sql_(self):
self._token('FROM')
self._token('SQL')
self._token('WITH')
self._specific_routine_designator_()
@graken()
def _alter_transform_statement_(self):
self._token('ALTER')
with self._group():
with self._choice():
with self._option():
self._token('TRANSFORM')
with self._option():
self._token('TRANSFORMS')
self._error('expecting one of: TRANSFORM TRANSFORMS')
self._token('FOR')
self._schema_qualified_name_()
def block1():
self._alter_group_()
self._positive_closure(block1)
@graken()
def _alter_group_(self):
self._identifier_()
self._left_paren_()
self._alter_transform_action_list_()
self._right_paren_()
@graken()
def _alter_transform_action_list_(self):
def sep0():
self._token(',')
def block0():
self._alter_transform_action_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _alter_transform_action_(self):
with self._choice():
with self._option():
self._add_transform_element_list_()
with self._option():
self._drop_transform_element_list_()
self._error('no available options')
@graken()
def _add_transform_element_list_(self):
self._token('ADD')
self._left_paren_()
self._transform_element_list_()
self._right_paren_()
@graken()
def _drop_transform_element_list_(self):
self._token('DROP')
self._left_paren_()
self._transform_kind_()
with self._optional():
self._comma_()
self._transform_kind_()
self._drop_behavior_()
self._right_paren_()
@graken()
def _transform_kind_(self):
with self._choice():
with self._option():
self._token('TO')
self._token('SQL')
with self._option():
self._token('FROM')
self._token('SQL')
self._error('expecting one of: FROM TO')
@graken()
def _drop_transform_statement_(self):
self._token('DROP')
with self._group():
with self._choice():
with self._option():
self._token('TRANSFORM')
with self._option():
self._token('TRANSFORMS')
self._error('expecting one of: TRANSFORM TRANSFORMS')
self._transforms_to_be_dropped_()
self._token('FOR')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _transforms_to_be_dropped_(self):
with self._choice():
with self._option():
self._token('ALL')
with self._option():
self._identifier_()
self._error('expecting one of: ALL')
@graken()
def _sequence_generator_definition_(self):
self._token('CREATE')
self._token('SEQUENCE')
self._schema_qualified_name_()
with self._optional():
self._sequence_generator_options_()
@graken()
def _sequence_generator_options_(self):
def block0():
self._sequence_generator_option_()
self._positive_closure(block0)
@graken()
def _sequence_generator_option_(self):
with self._choice():
with self._option():
self._sequence_generator_data_type_option_()
with self._option():
self._common_sequence_generator_options_()
self._error('no available options')
@graken()
def _common_sequence_generator_options_(self):
def block0():
self._common_sequence_generator_option_()
self._positive_closure(block0)
@graken()
def _common_sequence_generator_option_(self):
with self._choice():
with self._option():
self._sequence_generator_start_with_option_()
with self._option():
self._basic_sequence_generator_option_()
self._error('no available options')
@graken()
def _basic_sequence_generator_option_(self):
with self._choice():
with self._option():
self._sequence_generator_increment_by_option_()
with self._option():
self._sequence_generator_maxvalue_option_()
with self._option():
self._sequence_generator_minvalue_option_()
with self._option():
self._sequence_generator_cycle_option_()
self._error('no available options')
@graken()
def _sequence_generator_data_type_option_(self):
self._token('AS')
self._data_type_()
@graken()
def _sequence_generator_start_with_option_(self):
self._token('START')
self._token('WITH')
self._signed_numeric_literal_()
@graken()
def _sequence_generator_increment_by_option_(self):
self._token('INCREMENT')
self._token('BY')
self._signed_numeric_literal_()
@graken()
def _sequence_generator_maxvalue_option_(self):
with self._choice():
with self._option():
self._token('MAXVALUE')
self._signed_numeric_literal_()
with self._option():
self._token('NO')
self._token('MAXVALUE')
self._error('expecting one of: NO')
@graken()
def _sequence_generator_minvalue_option_(self):
with self._choice():
with self._option():
self._token('MINVALUE')
self._signed_numeric_literal_()
with self._option():
self._token('NO')
self._token('MINVALUE')
self._error('expecting one of: NO')
@graken()
def _sequence_generator_cycle_option_(self):
with self._choice():
with self._option():
self._token('CYCLE')
with self._option():
self._token('NO')
self._token('CYCLE')
self._error('expecting one of: CYCLE NO')
@graken()
def _alter_sequence_generator_statement_(self):
self._token('ALTER')
self._token('SEQUENCE')
self._schema_qualified_name_()
self._alter_sequence_generator_options_()
@graken()
def _alter_sequence_generator_options_(self):
def block0():
self._alter_sequence_generator_option_()
self._positive_closure(block0)
@graken()
def _alter_sequence_generator_option_(self):
with self._choice():
with self._option():
self._alter_sequence_generator_restart_option_()
with self._option():
self._basic_sequence_generator_option_()
self._error('no available options')
@graken()
def _alter_sequence_generator_restart_option_(self):
self._token('RESTART')
self._token('WITH')
self._signed_numeric_literal_()
@graken()
def _drop_sequence_generator_statement_(self):
self._token('DROP')
self._token('SEQUENCE')
self._schema_qualified_name_()
self._drop_behavior_()
@graken()
def _grant_statement_(self):
with self._choice():
with self._option():
self._grant_privilege_statement_()
with self._option():
self._grant_role_statement_()
self._error('no available options')
@graken()
def _grant_privilege_statement_(self):
self._token('GRANT')
self._privileges_()
self._token('TO')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('WITH')
self._token('HIERARCHY')
self._token('OPTION')
with self._optional():
self._token('WITH')
self._token('GRANT')
self._token('OPTION')
with self._optional():
self._token('GRANTED')
self._token('BY')
self._grantor_()
@graken()
def _privileges_(self):
self._object_privileges_()
self._token('ON')
self._object_name_()
@graken()
def _object_name_(self):
with self._choice():
with self._option():
with self._optional():
self._token('TABLE')
self._table_name_()
with self._option():
self._token('DOMAIN')
self._schema_qualified_name_()
with self._option():
self._token('COLLATION')
self._schema_qualified_name_()
with self._option():
self._token('CHARACTER')
self._token('SET')
self._character_set_name_()
with self._option():
self._token('TRANSLATION')
self._schema_qualified_name_()
with self._option():
self._token('TYPE')
self._schema_qualified_name_()
with self._option():
self._token('SEQUENCE')
self._schema_qualified_name_()
with self._option():
self._specific_routine_designator_()
self._error('no available options')
@graken()
def _object_privileges_(self):
with self._choice():
with self._option():
self._token('ALL')
self._token('PRIVILEGES')
with self._option():
def sep0():
self._token(',')
def block0():
self._action_()
self._positive_closure(block0, prefix=sep0)
self._error('expecting one of: ALL')
@graken()
def _action_(self):
with self._choice():
with self._option():
self._token('SELECT')
with self._option():
self._token('SELECT')
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('SELECT')
self._left_paren_()
self._privilege_method_list_()
self._right_paren_()
with self._option():
self._token('DELETE')
with self._option():
self._token('INSERT')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('UPDATE')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('REFERENCES')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._option():
self._token('USAGE')
with self._option():
self._token('TRIGGER')
with self._option():
self._token('UNDER')
with self._option():
self._token('EXECUTE')
self._error('expecting one of: DELETE EXECUTE INSERT REFERENCES SELECT TRIGGER UNDER UPDATE USAGE')
@graken()
def _privilege_method_list_(self):
def sep0():
self._token(',')
def block0():
self._specific_routine_designator_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _grantee_(self):
with self._choice():
with self._option():
self._token('PUBLIC')
with self._option():
self._identifier_()
self._error('expecting one of: PUBLIC')
@graken()
def _grantor_(self):
with self._choice():
with self._option():
self._token('CURRENT_USER')
with self._option():
self._token('CURRENT_ROLE')
self._error('expecting one of: CURRENT_ROLE CURRENT_USER')
@graken()
def _role_definition_(self):
self._token('CREATE')
self._token('ROLE')
self._identifier_()
with self._optional():
self._token('WITH')
self._token('ADMIN')
self._grantor_()
@graken()
def _grant_role_statement_(self):
self._token('GRANT')
self._identifier_list_()
self._token('TO')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('WITH')
self._token('ADMIN')
self._token('OPTION')
with self._optional():
self._token('GRANTED')
self._token('BY')
self._grantor_()
@graken()
def _drop_role_statement_(self):
self._token('DROP')
self._token('ROLE')
self._identifier_()
@graken()
def _revoke_statement_(self):
with self._choice():
with self._option():
self._revoke_privilege_statement_()
with self._option():
self._revoke_role_statement_()
self._error('no available options')
@graken()
def _revoke_privilege_statement_(self):
self._token('REVOKE')
with self._optional():
self._revoke_option_extension_()
self._privileges_()
self._token('FROM')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('GRANTED')
self._token('BY')
self._grantor_()
self._drop_behavior_()
@graken()
def _revoke_option_extension_(self):
with self._choice():
with self._option():
self._token('GRANT')
self._token('OPTION')
self._token('FOR')
with self._option():
self._token('HIERARCHY')
self._token('OPTION')
self._token('FOR')
self._error('expecting one of: GRANT HIERARCHY')
@graken()
def _revoke_role_statement_(self):
self._token('REVOKE')
with self._optional():
self._token('ADMIN')
self._token('OPTION')
self._token('FOR')
self._identifier_list_()
self._token('FROM')
def sep0():
self._token(',')
def block0():
self._grantee_()
self._positive_closure(block0, prefix=sep0)
with self._optional():
self._token('GRANTED')
self._token('BY')
self._grantor_()
self._drop_behavior_()
@graken()
def _character_set_specification_list_(self):
def sep0():
self._token(',')
def block0():
self._character_set_name_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _sql_procedure_statement_(self):
self._sql_executable_statement_()
@graken()
def _sql_executable_statement_(self):
with self._choice():
with self._option():
self._sql_schema_statement_()
with self._option():
self._sql_data_statement_()
with self._option():
self._sql_control_statement_()
with self._option():
self._sql_transaction_statement_()
with self._option():
self._sql_connection_statement_()
with self._option():
self._sql_session_statement_()
with self._option():
self._get_diagnostics_statement_()
with self._option():
self._sql_dynamic_statement_()
self._error('no available options')
@graken()
def _sql_schema_statement_(self):
with self._choice():
with self._option():
self._sql_schema_definition_statement_()
with self._option():
self._sql_schema_manipulation_statement_()
self._error('no available options')
@graken()
def _sql_schema_definition_statement_(self):
with self._choice():
with self._option():
self._schema_definition_()
with self._option():
self._table_definition_()
with self._option():
self._view_definition_()
with self._option():
self._schema_routine_()
with self._option():
self._grant_statement_()
with self._option():
self._role_definition_()
with self._option():
self._domain_definition_()
with self._option():
self._character_set_definition_()
with self._option():
self._collation_definition_()
with self._option():
self._transliteration_definition_()
with self._option():
self._assertion_definition_()
with self._option():
self._trigger_definition_()
with self._option():
self._user_defined_type_definition_()
with self._option():
self._user_defined_cast_definition_()
with self._option():
self._user_defined_ordering_definition_()
with self._option():
self._transform_definition_()
with self._option():
self._sequence_generator_definition_()
self._error('no available options')
@graken()
def _sql_schema_manipulation_statement_(self):
with self._choice():
with self._option():
self._drop_schema_statement_()
with self._option():
self._alter_table_statement_()
with self._option():
self._drop_table_statement_()
with self._option():
self._drop_view_statement_()
with self._option():
self._alter_routine_statement_()
with self._option():
self._drop_routine_statement_()
with self._option():
self._drop_user_defined_cast_statement_()
with self._option():
self._revoke_statement_()
with self._option():
self._drop_role_statement_()
with self._option():
self._alter_domain_statement_()
with self._option():
self._drop_domain_statement_()
with self._option():
self._drop_character_set_statement_()
with self._option():
self._drop_collation_statement_()
with self._option():
self._drop_transliteration_statement_()
with self._option():
self._drop_assertion_statement_()
with self._option():
self._drop_trigger_statement_()
with self._option():
self._alter_type_statement_()
with self._option():
self._drop_data_type_statement_()
with self._option():
self._drop_user_defined_ordering_statement_()
with self._option():
self._alter_transform_statement_()
with self._option():
self._drop_transform_statement_()
with self._option():
self._alter_sequence_generator_statement_()
with self._option():
self._drop_sequence_generator_statement_()
self._error('no available options')
@graken()
def _sql_data_statement_(self):
with self._choice():
with self._option():
self._open_statement_()
with self._option():
self._fetch_statement_()
with self._option():
self._close_statement_()
with self._option():
self._select_statement_single_row_()
with self._option():
self._free_locator_statement_()
with self._option():
self._hold_locator_statement_()
with self._option():
self._sql_data_change_statement_()
self._error('no available options')
@graken()
def _sql_data_change_statement_(self):
with self._choice():
with self._option():
self._delete_statement_positioned_()
with self._option():
self._delete_statement_searched_()
with self._option():
self._insert_statement_()
with self._option():
self._update_statement_positioned_()
with self._option():
self._update_statement_searched_()
with self._option():
self._merge_statement_()
self._error('no available options')
@graken()
def _sql_control_statement_(self):
with self._choice():
with self._option():
self._call_statement_()
with self._option():
self._return_statement_()
self._error('no available options')
@graken()
def _sql_transaction_statement_(self):
with self._choice():
with self._option():
self._start_transaction_statement_()
with self._option():
self._set_transaction_statement_()
with self._option():
self._set_constraints_mode_statement_()
with self._option():
self._savepoint_statement_()
with self._option():
self._release_savepoint_statement_()
with self._option():
self._commit_statement_()
with self._option():
self._rollback_statement_()
self._error('no available options')
@graken()
def _sql_connection_statement_(self):
with self._choice():
with self._option():
self._connect_statement_()
with self._option():
self._set_connection_statement_()
with self._option():
self._disconnect_statement_()
self._error('no available options')
@graken()
def _sql_session_statement_(self):
with self._choice():
with self._option():
self._set_session_user_identifier_statement_()
with self._option():
self._set_role_statement_()
with self._option():
self._set_local_time_zone_statement_()
with self._option():
self._set_session_characteristics_statement_()
with self._option():
self._set_catalog_statement_()
with self._option():
self._set_schema_statement_()
with self._option():
self._set_names_statement_()
with self._option():
self._set_path_statement_()
with self._option():
self._set_transform_group_statement_()
with self._option():
self._set_session_collation_statement_()
self._error('no available options')
@graken()
def _sql_dynamic_statement_(self):
with self._choice():
with self._option():
self._descriptor_statement_()
with self._option():
self._prepare_statement_()
with self._option():
self._deallocate_prepared_statement_()
with self._option():
self._describe_statement_()
with self._option():
self._execute_statement_()
with self._option():
self._execute_immediate_statement_()
with self._option():
self._sql_dynamic_data_statement_()
self._error('no available options')
@graken()
def _sql_dynamic_data_statement_(self):
with self._choice():
with self._option():
self._allocate_cursor_statement_()
with self._option():
self._dynamic_open_statement_()
with self._option():
self._dynamic_fetch_statement_()
with self._option():
self._dynamic_close_statement_()
with self._option():
self._dynamic_delete_statement_positioned_()
with self._option():
self._dynamic_update_statement_positioned_()
self._error('no available options')
@graken()
def _descriptor_statement_(self):
with self._choice():
with self._option():
self._allocate_descriptor_statement_()
with self._option():
self._deallocate_descriptor_statement_()
with self._option():
self._set_descriptor_statement_()
with self._option():
self._get_descriptor_statement_()
self._error('no available options')
@graken()
def _cursor_sensitivity_(self):
with self._choice():
with self._option():
self._token('SENSITIVE')
with self._option():
self._token('INSENSITIVE')
with self._option():
self._token('ASENSITIVE')
self._error('expecting one of: ASENSITIVE INSENSITIVE SENSITIVE')
@graken()
def _cursor_scrollability_(self):
with self._choice():
with self._option():
self._token('SCROLL')
with self._option():
self._token('NO')
self._token('SCROLL')
self._error('expecting one of: NO SCROLL')
@graken()
def _cursor_holdability_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('HOLD')
with self._option():
self._token('WITHOUT')
self._token('HOLD')
self._error('expecting one of: WITH WITHOUT')
@graken()
def _cursor_returnability_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('RETURN')
with self._option():
self._token('WITHOUT')
self._token('RETURN')
self._error('expecting one of: WITH WITHOUT')
@graken()
def _cursor_specification_(self):
self._query_expression_()
with self._optional():
self._order_by_clause_()
with self._optional():
self._updatability_clause_()
@graken()
def _updatability_clause_(self):
self._token('FOR')
with self._group():
with self._choice():
with self._option():
self._token('READ')
self._token('ONLY')
with self._option():
self._token('UPDATE')
with self._optional():
self._token('OF')
self._column_name_list_()
self._error('expecting one of: READ UPDATE')
@graken()
def _order_by_clause_(self):
self._token('ORDER')
self._token('BY')
self._sort_specification_list_()
@graken()
def _open_statement_(self):
self._token('OPEN')
self._cursor_name_()
@graken()
def _fetch_statement_(self):
self._token('FETCH')
with self._optional():
with self._optional():
self._fetch_orientation_()
self._token('FROM')
self._cursor_name_()
self._token('INTO')
self._fetch_target_list_()
@graken()
def _fetch_orientation_(self):
with self._choice():
with self._option():
self._token('NEXT')
with self._option():
self._token('PRIOR')
with self._option():
self._token('FIRST')
with self._option():
self._token('LAST')
with self._option():
with self._group():
with self._choice():
with self._option():
self._token('ABSOLUTE')
with self._option():
self._token('RELATIVE')
self._error('expecting one of: ABSOLUTE RELATIVE')
self._simple_value_specification_()
self._error('expecting one of: FIRST LAST NEXT PRIOR')
@graken()
def _fetch_target_list_(self):
def sep0():
self._token(',')
def block0():
self._target_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _close_statement_(self):
self._token('CLOSE')
self._cursor_name_()
@graken()
def _select_statement_single_row_(self):
self._token('SELECT')
with self._optional():
self._set_quantifier_()
self._select_list_()
self._token('INTO')
self._select_target_list_()
self._table_expression_()
@graken()
def _select_target_list_(self):
def sep0():
self._token(',')
def block0():
self._target_specification_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _delete_statement_positioned_(self):
self._token('DELETE')
self._token('FROM')
self._target_table_()
self._token('WHERE')
self._token('CURRENT')
self._token('OF')
self._cursor_name_()
@graken()
def _target_table_(self):
with self._choice():
with self._option():
self._table_name_()
with self._option():
self._token('ONLY')
self._left_paren_()
self._table_name_()
self._right_paren_()
self._error('no available options')
@graken()
def _delete_statement_searched_(self):
self._token('DELETE')
self._token('FROM')
self._target_table_()
with self._optional():
self._token('WHERE')
self._search_condition_()
@graken()
def _insert_statement_(self):
self._token('INSERT')
self._token('INTO')
self._table_name_()
self._insert_columns_and_source_()
@graken()
def _insert_columns_and_source_(self):
with self._choice():
with self._option():
self._from_subquery_()
with self._option():
self._from_constructor_()
with self._option():
self._from_default_()
self._error('no available options')
@graken()
def _from_subquery_(self):
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._optional():
self._override_clause_()
self._query_expression_()
@graken()
def _from_constructor_(self):
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._optional():
self._override_clause_()
self._contextually_typed_table_value_constructor_()
@graken()
def _override_clause_(self):
with self._choice():
with self._option():
self._token('OVERRIDING')
self._token('USER')
self._token('VALUE')
with self._option():
self._token('OVERRIDING')
self._token('SYSTEM')
self._token('VALUE')
self._error('expecting one of: OVERRIDING')
@graken()
def _from_default_(self):
self._token('DEFAULT')
self._token('VALUES')
@graken()
def _merge_statement_(self):
self._token('MERGE')
self._token('INTO')
self._target_table_()
with self._optional():
self._as_clause_()
self._token('USING')
self._table_reference_()
self._token('ON')
self._search_condition_()
self._merge_operation_specification_()
@graken()
def _merge_operation_specification_(self):
def block0():
self._merge_when_clause_()
self._positive_closure(block0)
@graken()
def _merge_when_clause_(self):
with self._choice():
with self._option():
self._merge_when_matched_clause_()
with self._option():
self._merge_when_not_matched_clause_()
self._error('no available options')
@graken()
def _merge_when_matched_clause_(self):
self._token('WHEN')
self._token('MATCHED')
self._token('THEN')
self._merge_update_specification_()
@graken()
def _merge_when_not_matched_clause_(self):
self._token('WHEN')
self._token('NOT')
self._token('MATCHED')
self._token('THEN')
self._merge_insert_specification_()
@graken()
def _merge_update_specification_(self):
self._token('UPDATE')
self._token('SET')
self._set_clause_list_()
@graken()
def _merge_insert_specification_(self):
self._token('INSERT')
with self._optional():
self._left_paren_()
self._column_name_list_()
self._right_paren_()
with self._optional():
self._override_clause_()
self._token('VALUES')
self._merge_insert_value_list_()
@graken()
def _merge_insert_value_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._merge_insert_value_element_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _merge_insert_value_element_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._contextually_typed_value_specification_()
self._error('no available options')
@graken()
def _update_statement_positioned_(self):
self._token('UPDATE')
self._target_table_()
self._token('SET')
self._set_clause_list_()
self._token('WHERE')
self._token('CURRENT')
self._token('OF')
self._cursor_name_()
@graken()
def _update_statement_searched_(self):
self._token('UPDATE')
self._target_table_()
self._token('SET')
self._set_clause_list_()
with self._optional():
self._token('WHERE')
self._search_condition_()
@graken()
def _set_clause_list_(self):
def sep0():
self._token(',')
def block0():
self._set_clause_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _set_clause_(self):
with self._choice():
with self._option():
self._multiple_column_assignment_()
with self._option():
self._set_target_()
self._equals_operator_()
self._update_source_()
self._error('no available options')
@graken()
def _set_target_(self):
with self._choice():
with self._option():
self._update_target_()
with self._option():
self._mutated_set_clause_()
self._error('no available options')
@graken()
def _multiple_column_assignment_(self):
self._set_target_list_()
self._equals_operator_()
self._contextually_typed_row_value_expression_()
@graken()
def _set_target_list_(self):
self._left_paren_()
def sep0():
self._token(',')
def block0():
self._set_target_()
self._positive_closure(block0, prefix=sep0)
self._right_paren_()
@graken()
def _update_target_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._identifier_()
self._left_bracket_or_trigraph_()
self._simple_value_specification_()
self._right_bracket_or_trigraph_()
self._error('no available options')
@graken()
def _mutated_set_clause_(self):
self._mutated_target_()
self._period_()
self._identifier_()
@graken()
def _mutated_target_(self):
with self._choice():
with self._option():
self._identifier_()
with self._option():
self._mutated_set_clause_()
self._error('no available options')
@graken()
def _update_source_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._contextually_typed_value_specification_()
self._error('no available options')
@graken()
def _temporary_table_declaration_(self):
self._token('DECLARE')
self._token('LOCAL')
self._token('TEMPORARY')
self._token('TABLE')
self._table_name_()
self._table_element_list_()
with self._optional():
self._token('ON')
self._token('COMMIT')
self._table_commit_action_()
self._token('ROWS')
@graken()
def _free_locator_statement_(self):
self._token('FREE')
self._token('LOCATOR')
def sep0():
self._token(',')
def block0():
self._locator_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _locator_reference_(self):
self._host_parameter_name_()
@graken()
def _hold_locator_statement_(self):
self._token('HOLD')
self._token('LOCATOR')
def sep0():
self._token(',')
def block0():
self._locator_reference_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _call_statement_(self):
self._token('CALL')
self._routine_invocation_()
@graken()
def _return_statement_(self):
self._token('RETURN')
self._return_value_()
@graken()
def _return_value_(self):
with self._choice():
with self._option():
self._value_expression_()
with self._option():
self._token('NULL')
self._error('expecting one of: NULL')
@graken()
def _start_transaction_statement_(self):
self._token('START')
self._token('TRANSACTION')
with self._optional():
self._transaction_characteristics_()
@graken()
def _transaction_mode_(self):
with self._choice():
with self._option():
self._isolation_level_()
with self._option():
self._transaction_access_mode_()
with self._option():
self._diagnostics_size_()
self._error('no available options')
@graken()
def _transaction_access_mode_(self):
with self._choice():
with self._option():
self._token('READ')
self._token('ONLY')
with self._option():
self._token('READ')
self._token('WRITE')
self._error('expecting one of: READ')
@graken()
def _isolation_level_(self):
self._token('ISOLATION')
self._token('LEVEL')
self._level_of_isolation_()
@graken()
def _level_of_isolation_(self):
with self._choice():
with self._option():
self._token('READ')
self._token('UNCOMMITTED')
with self._option():
self._token('READ')
self._token('COMMITTED')
with self._option():
self._token('REPEATABLE')
self._token('READ')
with self._option():
self._token('SERIALIZABLE')
self._error('expecting one of: READ REPEATABLE SERIALIZABLE')
@graken()
def _diagnostics_size_(self):
self._token('DIAGNOSTICS')
self._token('SIZE')
self._simple_value_specification_()
@graken()
def _set_transaction_statement_(self):
self._token('SET')
with self._optional():
self._token('LOCAL')
self._token('TRANSACTION')
self._transaction_characteristics_()
@graken()
def _transaction_characteristics_(self):
def sep0():
self._token(',')
def block0():
self._transaction_mode_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _set_constraints_mode_statement_(self):
self._token('SET')
self._token('CONSTRAINTS')
self._constraint_name_list_()
with self._group():
with self._choice():
with self._option():
self._token('DEFERRED')
with self._option():
self._token('IMMEDIATE')
self._error('expecting one of: DEFERRED IMMEDIATE')
@graken()
def _constraint_name_list_(self):
with self._choice():
with self._option():
self._token('ALL')
with self._option():
def sep0():
self._token(',')
def block0():
self._schema_qualified_name_()
self._positive_closure(block0, prefix=sep0)
self._error('expecting one of: ALL')
@graken()
def _savepoint_statement_(self):
self._token('SAVEPOINT')
self._identifier_()
@graken()
def _release_savepoint_statement_(self):
self._token('RELEASE')
self._token('SAVEPOINT')
self._identifier_()
@graken()
def _commit_statement_(self):
self._token('COMMIT')
with self._optional():
self._token('WORK')
with self._optional():
self._token('AND')
with self._optional():
self._token('NO')
self._token('CHAIN')
@graken()
def _rollback_statement_(self):
self._token('ROLLBACK')
with self._optional():
self._token('WORK')
with self._optional():
self._token('AND')
with self._optional():
self._token('NO')
self._token('CHAIN')
with self._optional():
self._savepoint_clause_()
@graken()
def _savepoint_clause_(self):
self._token('TO')
self._token('SAVEPOINT')
self._identifier_()
@graken()
def _connect_statement_(self):
self._token('CONNECT')
self._token('TO')
self._connection_target_()
@graken()
def _connection_target_(self):
with self._choice():
with self._option():
self._simple_value_specification_()
with self._optional():
self._token('AS')
self._connection_name_()
with self._optional():
self._token('USER')
self._simple_value_specification_()
with self._option():
self._token('DEFAULT')
self._error('expecting one of: DEFAULT')
@graken()
def _set_connection_statement_(self):
self._token('SET')
self._token('CONNECTION')
self._connection_object_()
@graken()
def _connection_object_(self):
with self._choice():
with self._option():
self._token('DEFAULT')
with self._option():
self._connection_name_()
self._error('expecting one of: DEFAULT')
@graken()
def _disconnect_statement_(self):
self._token('DISCONNECT')
self._disconnect_object_()
@graken()
def _disconnect_object_(self):
with self._choice():
with self._option():
self._connection_object_()
with self._option():
self._token('ALL')
with self._option():
self._token('CURRENT')
self._error('expecting one of: ALL CURRENT')
@graken()
def _set_session_characteristics_statement_(self):
self._token('SET')
self._token('SESSION')
self._token('CHARACTERISTICS')
self._token('AS')
self._session_characteristic_list_()
@graken()
def _session_characteristic_list_(self):
def sep0():
self._token(',')
def block0():
self._session_characteristic_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _session_characteristic_(self):
self._token('TRANSACTION')
self._transaction_characteristics_()
@graken()
def _set_session_user_identifier_statement_(self):
self._token('SET')
self._token('SESSION')
self._token('AUTHORIZATION')
self._value_specification_()
@graken()
def _set_role_statement_(self):
self._token('SET')
self._token('ROLE')
self._role_specification_()
@graken()
def _role_specification_(self):
with self._choice():
with self._option():
self._value_specification_()
with self._option():
self._token('NONE')
self._error('expecting one of: NONE')
@graken()
def _set_local_time_zone_statement_(self):
self._token('SET')
self._token('TIME')
self._token('ZONE')
self._set_time_zone_value_()
@graken()
def _set_time_zone_value_(self):
with self._choice():
with self._option():
self._interval_value_expression_()
with self._option():
self._token('LOCAL')
self._error('expecting one of: LOCAL')
@graken()
def _set_catalog_statement_(self):
self._token('SET')
self._catalog_name_characteristic_()
@graken()
def _catalog_name_characteristic_(self):
self._token('CATALOG')
self._value_specification_()
@graken()
def _set_schema_statement_(self):
self._token('SET')
self._schema_name_characteristic_()
@graken()
def _schema_name_characteristic_(self):
self._token('SCHEMA')
self._value_specification_()
@graken()
def _set_names_statement_(self):
self._token('SET')
self._character_set_name_characteristic_()
@graken()
def _character_set_name_characteristic_(self):
self._token('NAMES')
self._value_specification_()
@graken()
def _set_path_statement_(self):
self._token('SET')
self._sql_path_characteristic_()
@graken()
def _sql_path_characteristic_(self):
self._token('PATH')
self._value_specification_()
@graken()
def _set_transform_group_statement_(self):
self._token('SET')
self._transform_group_characteristic_()
@graken()
def _transform_group_characteristic_(self):
with self._choice():
with self._option():
self._token('DEFAULT')
self._token('TRANSFORM')
self._token('GROUP')
self._value_specification_()
with self._option():
self._token('TRANSFORM')
self._token('GROUP')
self._token('FOR')
self._token('TYPE')
self._schema_qualified_name_()
self._value_specification_()
self._error('no available options')
@graken()
def _set_session_collation_statement_(self):
with self._choice():
with self._option():
self._token('SET')
self._token('COLLATION')
self._value_specification_()
with self._optional():
self._token('FOR')
self._character_set_specification_list_()
with self._option():
self._token('SET')
self._token('NO')
self._token('COLLATION')
with self._optional():
self._token('FOR')
self._character_set_specification_list_()
self._error('expecting one of: SET')
@graken()
def _allocate_descriptor_statement_(self):
self._token('ALLOCATE')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
with self._optional():
self._token('WITH')
self._token('MAX')
self._simple_value_specification_()
@graken()
def _deallocate_descriptor_statement_(self):
self._token('DEALLOCATE')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
@graken()
def _get_descriptor_statement_(self):
self._token('GET')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
self._get_descriptor_information_()
@graken()
def _get_descriptor_information_(self):
with self._choice():
with self._option():
def sep0():
self._token(',')
def block0():
self._get_header_information_()
self._positive_closure(block0, prefix=sep0)
with self._option():
self._token('VALUE')
self._item_number_()
def sep1():
self._token(',')
def block1():
self._get_item_information_()
self._positive_closure(block1, prefix=sep1)
self._error('no available options')
@graken()
def _get_header_information_(self):
self._simple_target_specification_()
self._equals_operator_()
self._header_item_name_()
@graken()
def _header_item_name_(self):
with self._choice():
with self._option():
self._token('COUNT')
with self._option():
self._token('KEY_TYPE')
with self._option():
self._token('DYNAMIC_FUNCTION')
with self._option():
self._token('DYNAMIC_FUNCTION_CODE')
with self._option():
self._token('TOP_LEVEL_COUNT')
self._error('expecting one of: COUNT DYNAMIC_FUNCTION DYNAMIC_FUNCTION_CODE KEY_TYPE TOP_LEVEL_COUNT')
@graken()
def _get_item_information_(self):
self._simple_target_specification_()
self._equals_operator_()
self._descriptor_item_name_()
@graken()
def _item_number_(self):
self._simple_value_specification_()
@graken()
def _descriptor_item_name_(self):
with self._choice():
with self._option():
self._token('CARDINALITY')
with self._option():
self._token('CHARACTER_SET_CATALOG')
with self._option():
self._token('CHARACTER_SET_NAME')
with self._option():
self._token('CHARACTER_SET_SCHEMA')
with self._option():
self._token('COLLATION_CATALOG')
with self._option():
self._token('COLLATION_NAME')
with self._option():
self._token('COLLATION_SCHEMA')
with self._option():
self._token('DATA')
with self._option():
self._token('DATETIME_INTERVAL_CODE')
with self._option():
self._token('DATETIME_INTERVAL_PRECISION')
with self._option():
self._token('DEGREE')
with self._option():
self._token('INDICATOR')
with self._option():
self._token('KEY_MEMBER')
with self._option():
self._token('LENGTH')
with self._option():
self._token('LEVEL')
with self._option():
self._token('NAME')
with self._option():
self._token('NULLABLE')
with self._option():
self._token('OCTET_LENGTH')
with self._option():
self._token('PARAMETER_MODE')
with self._option():
self._token('PARAMETER_ORDINAL_POSITION')
with self._option():
self._token('PARAMETER_SPECIFIC_CATALOG')
with self._option():
self._token('PARAMETER_SPECIFIC_NAME')
with self._option():
self._token('PARAMETER_SPECIFIC_SCHEMA')
with self._option():
self._token('PRECISION')
with self._option():
self._token('RETURNED_CARDINALITY')
with self._option():
self._token('RETURNED_LENGTH')
with self._option():
self._token('RETURNED_OCTET_LENGTH')
with self._option():
self._token('SCALE')
with self._option():
self._token('SCOPE_CATALOG')
with self._option():
self._token('SCOPE_NAME')
with self._option():
self._token('SCOPE_SCHEMA')
with self._option():
self._token('TYPE')
with self._option():
self._token('UNNAMED')
with self._option():
self._token('USER_DEFINED_TYPE_CATALOG')
with self._option():
self._token('USER_DEFINED_TYPE_NAME')
with self._option():
self._token('USER_DEFINED_TYPE_SCHEMA')
with self._option():
self._token('USER_DEFINED_TYPE_CODE')
self._error('expecting one of: CARDINALITY CHARACTER_SET_CATALOG CHARACTER_SET_NAME CHARACTER_SET_SCHEMA COLLATION_CATALOG COLLATION_NAME COLLATION_SCHEMA DATA DATETIME_INTERVAL_CODE DATETIME_INTERVAL_PRECISION DEGREE INDICATOR KEY_MEMBER LENGTH LEVEL NAME NULLABLE OCTET_LENGTH PARAMETER_MODE PARAMETER_ORDINAL_POSITION PARAMETER_SPECIFIC_CATALOG PARAMETER_SPECIFIC_NAME PARAMETER_SPECIFIC_SCHEMA PRECISION RETURNED_CARDINALITY RETURNED_LENGTH RETURNED_OCTET_LENGTH SCALE SCOPE_CATALOG SCOPE_NAME SCOPE_SCHEMA TYPE UNNAMED USER_DEFINED_TYPE_CATALOG USER_DEFINED_TYPE_CODE USER_DEFINED_TYPE_NAME USER_DEFINED_TYPE_SCHEMA')
@graken()
def _set_descriptor_statement_(self):
self._token('SET')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
self._set_descriptor_information_()
@graken()
def _set_descriptor_information_(self):
with self._choice():
with self._option():
def sep0():
self._token(',')
def block0():
self._set_header_information_()
self._positive_closure(block0, prefix=sep0)
with self._option():
self._token('VALUE')
self._item_number_()
def sep1():
self._token(',')
def block1():
self._set_item_information_()
self._positive_closure(block1, prefix=sep1)
self._error('no available options')
@graken()
def _set_header_information_(self):
self._header_item_name_()
self._equals_operator_()
self._simple_value_specification_()
@graken()
def _set_item_information_(self):
self._descriptor_item_name_()
self._equals_operator_()
self._simple_value_specification_()
@graken()
def _prepare_statement_(self):
self._token('PREPARE')
self._sql_statement_name_()
with self._optional():
self._attributes_specification_()
self._token('FROM')
self._sql_statement_variable_()
@graken()
def _attributes_specification_(self):
self._token('ATTRIBUTES')
self._simple_value_specification_()
@graken()
def _sql_statement_variable_(self):
self._simple_value_specification_()
@graken()
def _deallocate_prepared_statement_(self):
self._token('DEALLOCATE')
self._token('PREPARE')
self._sql_statement_name_()
@graken()
def _describe_statement_(self):
with self._choice():
with self._option():
self._describe_input_statement_()
with self._option():
self._describe_output_statement_()
self._error('no available options')
@graken()
def _describe_input_statement_(self):
self._token('DESCRIBE')
self._token('INPUT')
self._sql_statement_name_()
self._using_descriptor_()
with self._optional():
self._nesting_option_()
@graken()
def _describe_output_statement_(self):
self._token('DESCRIBE')
with self._optional():
self._token('OUTPUT')
self._described_object_()
self._using_descriptor_()
with self._optional():
self._nesting_option_()
@graken()
def _nesting_option_(self):
with self._choice():
with self._option():
self._token('WITH')
self._token('NESTING')
with self._option():
self._token('WITHOUT')
self._token('NESTING')
self._error('expecting one of: WITH WITHOUT')
@graken()
def _using_descriptor_(self):
self._token('USING')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
@graken()
def _described_object_(self):
with self._choice():
with self._option():
self._sql_statement_name_()
with self._option():
self._token('CURSOR')
self._extended_cursor_name_()
self._token('STRUCTURE')
self._error('no available options')
@graken()
def _input_using_clause_(self):
with self._choice():
with self._option():
self._using_arguments_()
with self._option():
self._using_descriptor_()
self._error('no available options')
@graken()
def _using_arguments_(self):
self._token('USING')
def sep0():
self._token(',')
def block0():
self._using_argument_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _using_argument_(self):
self._general_value_specification_()
@graken()
def _output_using_clause_(self):
with self._choice():
with self._option():
self._into_arguments_()
with self._option():
self._into_descriptor_()
self._error('no available options')
@graken()
def _into_arguments_(self):
self._token('INTO')
def sep0():
self._token(',')
def block0():
self._into_argument_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _into_argument_(self):
self._target_specification_()
@graken()
def _into_descriptor_(self):
self._token('INTO')
with self._optional():
self._token('SQL')
self._token('DESCRIPTOR')
self._descriptor_name_()
@graken()
def _execute_statement_(self):
self._token('EXECUTE')
self._sql_statement_name_()
with self._optional():
self._output_using_clause_()
with self._optional():
self._input_using_clause_()
@graken()
def _execute_immediate_statement_(self):
self._token('EXECUTE')
self._token('IMMEDIATE')
self._sql_statement_variable_()
@graken()
def _allocate_cursor_statement_(self):
self._token('ALLOCATE')
self._extended_cursor_name_()
self._cursor_intent_()
@graken()
def _cursor_intent_(self):
with self._choice():
with self._option():
self._statement_cursor_()
with self._option():
self._result_set_cursor_()
self._error('no available options')
@graken()
def _statement_cursor_(self):
with self._optional():
self._cursor_sensitivity_()
with self._optional():
self._cursor_scrollability_()
self._token('CURSOR')
with self._optional():
self._cursor_holdability_()
with self._optional():
self._cursor_returnability_()
self._token('FOR')
self._extended_statement_name_()
@graken()
def _result_set_cursor_(self):
self._token('FOR')
self._token('PROCEDURE')
self._specific_routine_designator_()
@graken()
def _dynamic_open_statement_(self):
self._token('OPEN')
self._dynamic_cursor_name_()
with self._optional():
self._input_using_clause_()
@graken()
def _dynamic_fetch_statement_(self):
self._token('FETCH')
with self._optional():
with self._optional():
self._fetch_orientation_()
self._token('FROM')
self._dynamic_cursor_name_()
self._output_using_clause_()
@graken()
def _dynamic_close_statement_(self):
self._token('CLOSE')
self._dynamic_cursor_name_()
@graken()
def _dynamic_delete_statement_positioned_(self):
self._token('DELETE')
self._token('FROM')
self._target_table_()
self._token('WHERE')
self._token('CURRENT')
self._token('OF')
self._dynamic_cursor_name_()
@graken()
def _dynamic_update_statement_positioned_(self):
self._token('UPDATE')
self._target_table_()
self._token('SET')
self._set_clause_list_()
self._token('WHERE')
self._token('CURRENT')
self._token('OF')
self._dynamic_cursor_name_()
@graken()
def _direct_sql_statement_(self):
self._directly_executable_statement_()
self._semicolon_()
@graken()
def _directly_executable_statement_(self):
with self._choice():
with self._option():
self._direct_sql_data_statement_()
with self._option():
self._sql_schema_statement_()
with self._option():
self._sql_transaction_statement_()
with self._option():
self._sql_connection_statement_()
with self._option():
self._sql_session_statement_()
self._error('no available options')
@graken()
def _direct_sql_data_statement_(self):
with self._choice():
with self._option():
self._delete_statement_searched_()
with self._option():
self._cursor_specification_()
with self._option():
self._insert_statement_()
with self._option():
self._update_statement_searched_()
with self._option():
self._merge_statement_()
with self._option():
self._temporary_table_declaration_()
self._error('no available options')
@graken()
def _get_diagnostics_statement_(self):
self._token('GET')
self._token('DIAGNOSTICS')
self._sql_diagnostics_information_()
@graken()
def _sql_diagnostics_information_(self):
with self._choice():
with self._option():
self._statement_information_()
with self._option():
self._condition_information_()
self._error('no available options')
@graken()
def _statement_information_(self):
def sep0():
self._token(',')
def block0():
self._statement_information_item_()
self._positive_closure(block0, prefix=sep0)
@graken()
def _statement_information_item_(self):
self._simple_target_specification_()
self._equals_operator_()
self._statement_information_item_name_()
@graken()
def _statement_information_item_name_(self):
with self._choice():
with self._option():
self._token('NUMBER')
with self._option():
self._token('MORE')
with self._option():
self._token('COMMAND_FUNCTION')
with self._option():
self._token('COMMAND_FUNCTION_CODE')
with self._option():
self._token('DYNAMIC_FUNCTION')
with self._option():
self._token('DYNAMIC_FUNCTION_CODE')
with self._option():
self._token('ROW_COUNT')
with self._option():
self._token('TRANSACTIONS_COMMITTED')
with self._option():
self._token('TRANSACTIONS_ROLLED_BACK')
with self._option():
self._token('TRANSACTION_ACTIVE')
self._error('expecting one of: COMMAND_FUNCTION COMMAND_FUNCTION_CODE DYNAMIC_FUNCTION DYNAMIC_FUNCTION_CODE MORE NUMBER ROW_COUNT TRANSACTIONS_COMMITTED TRANSACTIONS_ROLLED_BACK TRANSACTION_ACTIVE')
@graken()
def _condition_information_(self):
with self._group():
with self._choice():
with self._option():
self._token('EXCEPTION')
with self._option():
self._token('CONDITION')
self._error('expecting one of: CONDITION EXCEPTION')
self._simple_value_specification_()
def sep1():
self._token(',')
def block1():
self._condition_information_item_()
self._positive_closure(block1, prefix=sep1)
@graken()
def _condition_information_item_(self):
self._simple_target_specification_()
self._equals_operator_()
self._condition_information_item_name_()
@graken()
def _condition_information_item_name_(self):
with self._choice():
with self._option():
self._token('CATALOG_NAME')
with self._option():
self._token('CLASS_ORIGIN')
with self._option():
self._token('COLUMN_NAME')
with self._option():
self._token('CONDITION_NUMBER')
with self._option():
self._token('CONNECTION_NAME')
with self._option():
self._token('CONSTRAINT_CATALOG')
with self._option():
self._token('CONSTRAINT_NAME')
with self._option():
self._token('CONSTRAINT_SCHEMA')
with self._option():
self._token('CURSOR_NAME')
with self._option():
self._token('MESSAGE_LENGTH')
with self._option():
self._token('MESSAGE_OCTET_LENGTH')
with self._option():
self._token('MESSAGE_TEXT')
with self._option():
self._token('PARAMETER_MODE')
with self._option():
self._token('PARAMETER_NAME')
with self._option():
self._token('PARAMETER_ORDINAL_POSITION')
with self._option():
self._token('RETURNED_SQLSTATE')
with self._option():
self._token('ROUTINE_CATALOG')
with self._option():
self._token('ROUTINE_NAME')
with self._option():
self._token('ROUTINE_SCHEMA')
with self._option():
self._token('SCHEMA_NAME')
with self._option():
self._token('SERVER_NAME')
with self._option():
self._token('SPECIFIC_NAME')
with self._option():
self._token('SUBCLASS_ORIGIN')
with self._option():
self._token('TABLE_NAME')
with self._option():
self._token('TRIGGER_CATALOG')
with self._option():
self._token('TRIGGER_NAME')
with self._option():
self._token('TRIGGER_SCHEMA')
self._error('expecting one of: CATALOG_NAME CLASS_ORIGIN COLUMN_NAME CONDITION_NUMBER CONNECTION_NAME CONSTRAINT_CATALOG CONSTRAINT_NAME CONSTRAINT_SCHEMA CURSOR_NAME MESSAGE_LENGTH MESSAGE_OCTET_LENGTH MESSAGE_TEXT PARAMETER_MODE PARAMETER_NAME PARAMETER_ORDINAL_POSITION RETURNED_SQLSTATE ROUTINE_CATALOG ROUTINE_NAME ROUTINE_SCHEMA SCHEMA_NAME SERVER_NAME SPECIFIC_NAME SUBCLASS_ORIGIN TABLE_NAME TRIGGER_CATALOG TRIGGER_NAME TRIGGER_SCHEMA')
@graken()
def _ref_cast_option_(self):
with self._optional():
self._cast_to_ref_()
with self._optional():
self._cast_to_type_()
@graken()
def _cast_option_(self):
with self._optional():
self._cast_to_distinct_()
with self._optional():
self._cast_to_source_()
@graken()
def _reference_scope_check_(self):
self._token('REFERENCES')
self._token('ARE')
with self._optional():
self._token('NOT')
self._token('CHECKED')
with self._optional():
self._token('ON')
self._token('DELETE')
self._referential_action_()
@graken()
def _start_(self):
self._direct_sql_statement_()
self._check_eof()
class SqlSemantics(object):
def digit(self, ast):
return ast
def double_quote(self, ast):
return ast
def quote(self, ast):
return ast
def left_paren(self, ast):
return ast
def right_paren(self, ast):
return ast
def asterisk(self, ast):
return ast
def plus_sign(self, ast):
return ast
def comma(self, ast):
return ast
def minus_sign(self, ast):
return ast
def period(self, ast):
return ast
def solidus(self, ast):
return ast
def colon(self, ast):
return ast
def semicolon(self, ast):
return ast
def less_than_operator(self, ast):
return ast
def equals_operator(self, ast):
return ast
def greater_than_operator(self, ast):
return ast
def question_mark(self, ast):
return ast
def left_bracket_or_trigraph(self, ast):
return ast
def right_bracket_or_trigraph(self, ast):
return ast
def left_bracket(self, ast):
return ast
def left_bracket_trigraph(self, ast):
return ast
def right_bracket(self, ast):
return ast
def right_bracket_trigraph(self, ast):
return ast
def underscore(self, ast):
return ast
def regular_identifier(self, ast):
return ast
def large_object_length_token(self, ast):
return ast
def multiplier(self, ast):
return ast
def delimited_identifier(self, ast):
return ast
def delimited_identifier_body(self, ast):
return ast
def unicode_escape_value(self, ast):
return ast
def unicode_4_digit_escape_value(self, ast):
return ast
def unicode_6_digit_escape_value(self, ast):
return ast
def unicode_character_escape_value(self, ast):
return ast
def unicode_escape_character(self, ast):
return ast
def not_equals_operator(self, ast):
return ast
def greater_than_or_equals_operator(self, ast):
return ast
def less_than_or_equals_operator(self, ast):
return ast
def concatenation_operator(self, ast):
return ast
def right_arrow(self, ast):
return ast
def double_colon(self, ast):
return ast
def literal(self, ast):
return ast
def unsigned_literal(self, ast):
return ast
def general_literal(self, ast):
return ast
def character_string_literal(self, ast):
return ast
def character_representation(self, ast):
return ast
def national_character_string_literal(self, ast):
return ast
def unicode_character_string_literal(self, ast):
return ast
def unicode_representation(self, ast):
return ast
def binary_string_literal(self, ast):
return ast
def hexit(self, ast):
return ast
def byte(self, ast):
return ast
def signed_numeric_literal(self, ast):
return ast
def unsigned_numeric_literal(self, ast):
return ast
def exact_numeric_literal(self, ast):
return ast
def sign(self, ast):
return ast
def approximate_numeric_literal(self, ast):
return ast
def signed_integer(self, ast):
return ast
def unsigned_integer(self, ast):
return ast
def datetime_literal(self, ast):
return ast
def date_literal(self, ast):
return ast
def time_literal(self, ast):
return ast
def timestamp_literal(self, ast):
return ast
def date_string(self, ast):
return ast
def time_string(self, ast):
return ast
def timestamp_string(self, ast):
return ast
def time_zone_interval(self, ast):
return ast
def date_value(self, ast):
return ast
def time_value(self, ast):
return ast
def interval_literal(self, ast):
return ast
def interval_string(self, ast):
return ast
def unquoted_date_string(self, ast):
return ast
def unquoted_time_string(self, ast):
return ast
def unquoted_timestamp_string(self, ast):
return ast
def unquoted_interval_string(self, ast):
return ast
def year_month_literal(self, ast):
return ast
def day_time_literal(self, ast):
return ast
def day_time_interval(self, ast):
return ast
def time_interval(self, ast):
return ast
def years_value(self, ast):
return ast
def months_value(self, ast):
return ast
def days_value(self, ast):
return ast
def hours_value(self, ast):
return ast
def minutes_value(self, ast):
return ast
def seconds_value(self, ast):
return ast
def datetime_value(self, ast):
return ast
def boolean_literal(self, ast):
return ast
def identifier(self, ast):
return ast
def identifier_list(self, ast):
return ast
def actual_identifier(self, ast):
return ast
def table_name(self, ast):
return ast
def schema_name(self, ast):
return ast
def schema_qualified_name(self, ast):
return ast
def local_or_schema_qualified_name(self, ast):
return ast
def local_or_schema_qualifier(self, ast):
return ast
def cursor_name(self, ast):
return ast
def local_qualified_name(self, ast):
return ast
def host_parameter_name(self, ast):
return ast
def external_routine_name(self, ast):
return ast
def character_set_name(self, ast):
return ast
def connection_name(self, ast):
return ast
def sql_statement_name(self, ast):
return ast
def extended_statement_name(self, ast):
return ast
def dynamic_cursor_name(self, ast):
return ast
def extended_cursor_name(self, ast):
return ast
def descriptor_name(self, ast):
return ast
def scope_option(self, ast):
return ast
def data_type(self, ast):
return ast
def predefined_type(self, ast):
return ast
def character_string_type(self, ast):
return ast
def character_large_object_type(self, ast):
return ast
def national_character_string_type(self, ast):
return ast
def national_character_large_object_type(self, ast):
return ast
def binary_large_object_string_type(self, ast):
return ast
def numeric_type(self, ast):
return ast
def exact_numeric_type(self, ast):
return ast
def approximate_numeric_type(self, ast):
return ast
def length(self, ast):
return ast
def large_object_length(self, ast):
return ast
def char_length_units(self, ast):
return ast
def precision(self, ast):
return ast
def scale(self, ast):
return ast
def datetime_type(self, ast):
return ast
def with_or_without_time_zone(self, ast):
return ast
def interval_type(self, ast):
return ast
def row_type(self, ast):
return ast
def row_type_body(self, ast):
return ast
def reference_type(self, ast):
return ast
def scope_clause(self, ast):
return ast
def collection_type(self, ast):
return ast
def array_type(self, ast):
return ast
def multiset_type(self, ast):
return ast
def field_definition(self, ast):
return ast
def value_expression_primary(self, ast):
return ast
def parenthesized_value_expression(self, ast):
return ast
def nonparenthesized_value_expression_primary(self, ast):
return ast
def collection_value_constructor(self, ast):
return ast
def value_specification(self, ast):
return ast
def unsigned_value_specification(self, ast):
return ast
def general_value_specification(self, ast):
return ast
def simple_value_specification(self, ast):
return ast
def target_specification(self, ast):
return ast
def simple_target_specification(self, ast):
return ast
def host_parameter_specification(self, ast):
return ast
def dynamic_parameter_specification(self, ast):
return ast
def indicator_parameter(self, ast):
return ast
def target_array_element_specification(self, ast):
return ast
def target_array_reference(self, ast):
return ast
def current_collation_specification(self, ast):
return ast
def contextually_typed_value_specification(self, ast):
return ast
def implicitly_typed_value_specification(self, ast):
return ast
def empty_specification(self, ast):
return ast
def identifier_chain(self, ast):
return ast
def column_reference(self, ast):
return ast
def sql_parameter_reference(self, ast):
return ast
def set_function_specification(self, ast):
return ast
def grouping_operation(self, ast):
return ast
def window_function(self, ast):
return ast
def window_function_type(self, ast):
return ast
def rank_function_type(self, ast):
return ast
def window_name_or_specification(self, ast):
return ast
def case_expression(self, ast):
return ast
def case_abbreviation(self, ast):
return ast
def case_specification(self, ast):
return ast
def simple_case(self, ast):
return ast
def searched_case(self, ast):
return ast
def simple_when_clause(self, ast):
return ast
def searched_when_clause(self, ast):
return ast
def else_clause(self, ast):
return ast
def case_operand(self, ast):
return ast
def when_operand(self, ast):
return ast
def result(self, ast):
return ast
def cast_specification(self, ast):
return ast
def cast_operand(self, ast):
return ast
def cast_target(self, ast):
return ast
def next_value_expression(self, ast):
return ast
def field_reference(self, ast):
return ast
def subtype_treatment(self, ast):
return ast
def target_subtype(self, ast):
return ast
def method_invocation(self, ast):
return ast
def direct_invocation(self, ast):
return ast
def generalized_invocation(self, ast):
return ast
def static_method_invocation(self, ast):
return ast
def new_specification(self, ast):
return ast
def attribute_or_method_reference(self, ast):
return ast
def reference_resolution(self, ast):
return ast
def array_element_reference(self, ast):
return ast
def multiset_element_reference(self, ast):
return ast
def value_expression(self, ast):
return ast
def common_value_expression(self, ast):
return ast
def user_defined_type_value_expression(self, ast):
return ast
def reference_value_expression(self, ast):
return ast
def collection_value_expression(self, ast):
return ast
def numeric_value_expression(self, ast):
return ast
def term(self, ast):
return ast
def factor(self, ast):
return ast
def numeric_primary(self, ast):
return ast
def numeric_value_function(self, ast):
return ast
def position_expression(self, ast):
return ast
def string_position_expression(self, ast):
return ast
def blob_position_expression(self, ast):
return ast
def length_expression(self, ast):
return ast
def char_length_expression(self, ast):
return ast
def octet_length_expression(self, ast):
return ast
def extract_expression(self, ast):
return ast
def extract_field(self, ast):
return ast
def time_zone_field(self, ast):
return ast
def extract_source(self, ast):
return ast
def cardinality_expression(self, ast):
return ast
def absolute_value_expression(self, ast):
return ast
def modulus_expression(self, ast):
return ast
def natural_logarithm(self, ast):
return ast
def exponential_function(self, ast):
return ast
def power_function(self, ast):
return ast
def square_root(self, ast):
return ast
def floor_function(self, ast):
return ast
def ceiling_function(self, ast):
return ast
def width_bucket_function(self, ast):
return ast
def string_value_expression(self, ast):
return ast
def character_value_expression(self, ast):
return ast
def concatenation(self, ast):
return ast
def character_factor(self, ast):
return ast
def character_primary(self, ast):
return ast
def blob_value_expression(self, ast):
return ast
def blob_factor(self, ast):
return ast
def blob_primary(self, ast):
return ast
def blob_concatenation(self, ast):
return ast
def string_value_function(self, ast):
return ast
def character_value_function(self, ast):
return ast
def character_substring_function(self, ast):
return ast
def regular_expression_substring_function(self, ast):
return ast
def fold(self, ast):
return ast
def transcoding(self, ast):
return ast
def character_transliteration(self, ast):
return ast
def trim_function(self, ast):
return ast
def trim_operands(self, ast):
return ast
def trim_specification(self, ast):
return ast
def character_overlay_function(self, ast):
return ast
def normalize_function(self, ast):
return ast
def specific_type_method(self, ast):
return ast
def blob_value_function(self, ast):
return ast
def blob_substring_function(self, ast):
return ast
def blob_trim_function(self, ast):
return ast
def blob_trim_operands(self, ast):
return ast
def blob_overlay_function(self, ast):
return ast
def start_position(self, ast):
return ast
def string_length(self, ast):
return ast
def datetime_value_expression(self, ast):
return ast
def datetime_term(self, ast):
return ast
def datetime_factor(self, ast):
return ast
def datetime_primary(self, ast):
return ast
def time_zone(self, ast):
return ast
def time_zone_specifier(self, ast):
return ast
def datetime_value_function(self, ast):
return ast
def current_time_value_function(self, ast):
return ast
def current_local_time_value_function(self, ast):
return ast
def current_timestamp_value_function(self, ast):
return ast
def current_local_timestamp_value_function(self, ast):
return ast
def interval_value_expression(self, ast):
return ast
def interval_term(self, ast):
return ast
def interval_factor(self, ast):
return ast
def interval_primary(self, ast):
return ast
def interval_value_expression_1(self, ast):
return ast
def interval_term_1(self, ast):
return ast
def interval_term_2(self, ast):
return ast
def interval_absolute_value_function(self, ast):
return ast
def boolean_value_expression(self, ast):
return ast
def boolean_term(self, ast):
return ast
def boolean_factor(self, ast):
return ast
def boolean_test(self, ast):
return ast
def truth_value(self, ast):
return ast
def boolean_primary(self, ast):
return ast
def boolean_predicand(self, ast):
return ast
def parenthesized_boolean_value_expression(self, ast):
return ast
def array_value_expression(self, ast):
return ast
def array_concatenation(self, ast):
return ast
def array_primary(self, ast):
return ast
def array_value_constructor(self, ast):
return ast
def array_value_constructor_by_enumeration(self, ast):
return ast
def array_element_list(self, ast):
return ast
def array_element(self, ast):
return ast
def array_value_constructor_by_query(self, ast):
return ast
def multiset_value_expression(self, ast):
return ast
def multiset_term(self, ast):
return ast
def multiset_primary(self, ast):
return ast
def multiset_set_function(self, ast):
return ast
def multiset_value_constructor(self, ast):
return ast
def multiset_value_constructor_by_enumeration(self, ast):
return ast
def multiset_element_list(self, ast):
return ast
def multiset_element(self, ast):
return ast
def multiset_value_constructor_by_query(self, ast):
return ast
def table_value_constructor_by_query(self, ast):
return ast
def row_value_constructor(self, ast):
return ast
def explicit_row_value_constructor(self, ast):
return ast
def row_value_constructor_element_list(self, ast):
return ast
def row_value_constructor_element(self, ast):
return ast
def contextually_typed_row_value_constructor(self, ast):
return ast
def contextually_typed_row_value_constructor_element_list(self, ast):
return ast
def contextually_typed_row_value_constructor_element(self, ast):
return ast
def row_value_constructor_predicand(self, ast):
return ast
def row_value_expression(self, ast):
return ast
def table_row_value_expression(self, ast):
return ast
def contextually_typed_row_value_expression(self, ast):
return ast
def row_value_predicand(self, ast):
return ast
def row_value_special_case(self, ast):
return ast
def table_value_constructor(self, ast):
return ast
def row_value_expression_list(self, ast):
return ast
def contextually_typed_table_value_constructor(self, ast):
return ast
def contextually_typed_row_value_expression_list(self, ast):
return ast
def table_expression(self, ast):
return ast
def from_clause(self, ast):
return ast
def table_reference_list(self, ast):
return ast
def table_reference(self, ast):
return ast
def table_factor(self, ast):
return ast
def sample_clause(self, ast):
return ast
def sample_method(self, ast):
return ast
def repeatable_clause(self, ast):
return ast
def table_primary(self, ast):
return ast
def parenthesized_joined_table(self, ast):
return ast
def only_spec(self, ast):
return ast
def lateral_derived_table(self, ast):
return ast
def collection_derived_table(self, ast):
return ast
def table_function_derived_table(self, ast):
return ast
def table_or_query_name(self, ast):
return ast
def column_name_list(self, ast):
return ast
def joined_table(self, ast):
return ast
def cross_join(self, ast):
return ast
def qualified_join(self, ast):
return ast
def natural_join(self, ast):
return ast
def union_join(self, ast):
return ast
def join_specification(self, ast):
return ast
def join_condition(self, ast):
return ast
def named_columns_join(self, ast):
return ast
def join_type(self, ast):
return ast
def outer_join_type(self, ast):
return ast
def where_clause(self, ast):
return ast
def group_by_clause(self, ast):
return ast
def grouping_element_list(self, ast):
return ast
def grouping_element(self, ast):
return ast
def ordinary_grouping_set(self, ast):
return ast
def grouping_column_reference(self, ast):
return ast
def grouping_column_reference_list(self, ast):
return ast
def rollup_list(self, ast):
return ast
def ordinary_grouping_set_list(self, ast):
return ast
def cube_list(self, ast):
return ast
def grouping_sets_specification(self, ast):
return ast
def grouping_set_list(self, ast):
return ast
def grouping_set(self, ast):
return ast
def empty_grouping_set(self, ast):
return ast
def having_clause(self, ast):
return ast
def window_clause(self, ast):
return ast
def window_definition_list(self, ast):
return ast
def window_definition(self, ast):
return ast
def window_specification(self, ast):
return ast
def window_specification_details(self, ast):
return ast
def window_partition_clause(self, ast):
return ast
def window_partition_column_reference_list(self, ast):
return ast
def window_partition_column_reference(self, ast):
return ast
def window_frame_clause(self, ast):
return ast
def window_frame_units(self, ast):
return ast
def window_frame_extent(self, ast):
return ast
def window_frame_start(self, ast):
return ast
def window_frame_preceding(self, ast):
return ast
def window_frame_between(self, ast):
return ast
def window_frame_bound(self, ast):
return ast
def window_frame_following(self, ast):
return ast
def window_frame_exclusion(self, ast):
return ast
def query_specification(self, ast):
return ast
def select_list(self, ast):
return ast
def select_sublist(self, ast):
return ast
def qualified_asterisk(self, ast):
return ast
def derived_column(self, ast):
return ast
def as_clause(self, ast):
return ast
def all_fields_reference(self, ast):
return ast
def query_expression(self, ast):
return ast
def with_clause(self, ast):
return ast
def with_list(self, ast):
return ast
def with_list_element(self, ast):
return ast
def query_expression_body(self, ast):
return ast
def non_join_query_expression(self, ast):
return ast
def query_term(self, ast):
return ast
def non_join_query_term(self, ast):
return ast
def query_primary(self, ast):
return ast
def non_join_query_primary(self, ast):
return ast
def simple_table(self, ast):
return ast
def explicit_table(self, ast):
return ast
def corresponding_spec(self, ast):
return ast
def search_or_cycle_clause(self, ast):
return ast
def search_clause(self, ast):
return ast
def recursive_search_order(self, ast):
return ast
def cycle_clause(self, ast):
return ast
def cycle_column_list(self, ast):
return ast
def subquery(self, ast):
return ast
def predicate(self, ast):
return ast
def comparison_predicate(self, ast):
return ast
def comparison_predicate_part_2(self, ast):
return ast
def comp_op(self, ast):
return ast
def between_predicate(self, ast):
return ast
def between_predicate_part_2(self, ast):
return ast
def in_predicate(self, ast):
return ast
def in_predicate_part_2(self, ast):
return ast
def in_predicate_value(self, ast):
return ast
def in_value_list(self, ast):
return ast
def like_predicate(self, ast):
return ast
def character_like_predicate(self, ast):
return ast
def character_like_predicate_part_2(self, ast):
return ast
def escape_character(self, ast):
return ast
def octet_like_predicate(self, ast):
return ast
def octet_like_predicate_part_2(self, ast):
return ast
def similar_predicate(self, ast):
return ast
def similar_predicate_part_2(self, ast):
return ast
def null_predicate(self, ast):
return ast
def null_predicate_part_2(self, ast):
return ast
def quantified_comparison_predicate(self, ast):
return ast
def quantified_comparison_predicate_part_2(self, ast):
return ast
def quantifier(self, ast):
return ast
def some(self, ast):
return ast
def exists_predicate(self, ast):
return ast
def unique_predicate(self, ast):
return ast
def normalized_predicate(self, ast):
return ast
def normalized_predicate_part_2(self, ast):
return ast
def match_predicate(self, ast):
return ast
def match_predicate_part_2(self, ast):
return ast
def overlaps_predicate(self, ast):
return ast
def overlaps_predicate_part_1(self, ast):
return ast
def overlaps_predicate_part_2(self, ast):
return ast
def distinct_predicate(self, ast):
return ast
def distinct_predicate_part_2(self, ast):
return ast
def member_predicate(self, ast):
return ast
def member_predicate_part_2(self, ast):
return ast
def submultiset_predicate(self, ast):
return ast
def submultiset_predicate_part_2(self, ast):
return ast
def set_predicate(self, ast):
return ast
def set_predicate_part_2(self, ast):
return ast
def type_predicate(self, ast):
return ast
def type_predicate_part_2(self, ast):
return ast
def type_list(self, ast):
return ast
def user_defined_type_specification(self, ast):
return ast
def exclusive_user_defined_type_specification(self, ast):
return ast
def search_condition(self, ast):
return ast
def interval_qualifier(self, ast):
return ast
def start_field(self, ast):
return ast
def end_field(self, ast):
return ast
def single_datetime_field(self, ast):
return ast
def primary_datetime_field(self, ast):
return ast
def non_second_primary_datetime_field(self, ast):
return ast
def language_clause(self, ast):
return ast
def language_name(self, ast):
return ast
def path_specification(self, ast):
return ast
def schema_name_list(self, ast):
return ast
def routine_invocation(self, ast):
return ast
def sql_argument_list(self, ast):
return ast
def sql_argument(self, ast):
return ast
def generalized_expression(self, ast):
return ast
def specific_routine_designator(self, ast):
return ast
def routine_type(self, ast):
return ast
def member_name(self, ast):
return ast
def member_name_alternatives(self, ast):
return ast
def data_type_list(self, ast):
return ast
def collate_clause(self, ast):
return ast
def constraint_name_definition(self, ast):
return ast
def constraint_characteristics(self, ast):
return ast
def constraint_check_time(self, ast):
return ast
def aggregate_function(self, ast):
return ast
def general_set_function(self, ast):
return ast
def computational_operation(self, ast):
return ast
def set_quantifier(self, ast):
return ast
def filter_clause(self, ast):
return ast
def binary_set_function(self, ast):
return ast
def binary_set_function_type(self, ast):
return ast
def ordered_set_function(self, ast):
return ast
def hypothetical_set_function(self, ast):
return ast
def within_group_specification(self, ast):
return ast
def hypothetical_set_function_value_expression_list(self, ast):
return ast
def inverse_distribution_function(self, ast):
return ast
def inverse_distribution_function_type(self, ast):
return ast
def sort_specification_list(self, ast):
return ast
def sort_specification(self, ast):
return ast
def ordering_specification(self, ast):
return ast
def null_ordering(self, ast):
return ast
def schema_definition(self, ast):
return ast
def schema_character_set_or_path(self, ast):
return ast
def schema_name_clause(self, ast):
return ast
def schema_character_set_specification(self, ast):
return ast
def schema_path_specification(self, ast):
return ast
def schema_element(self, ast):
return ast
def drop_schema_statement(self, ast):
return ast
def drop_behavior(self, ast):
return ast
def table_definition(self, ast):
return ast
def table_contents_source(self, ast):
return ast
def table_scope(self, ast):
return ast
def global_or_local(self, ast):
return ast
def table_commit_action(self, ast):
return ast
def table_element_list(self, ast):
return ast
def table_element(self, ast):
return ast
def typed_table_clause(self, ast):
return ast
def self_referencing_column_specification(self, ast):
return ast
def reference_generation(self, ast):
return ast
def column_options(self, ast):
return ast
def column_option_list(self, ast):
return ast
def subtable_clause(self, ast):
return ast
def like_clause(self, ast):
return ast
def like_options(self, ast):
return ast
def identity_option(self, ast):
return ast
def column_default_option(self, ast):
return ast
def as_subquery_clause(self, ast):
return ast
def with_or_without_data(self, ast):
return ast
def column_definition(self, ast):
return ast
def data_type_or_domain_name(self, ast):
return ast
def column_constraint_definition(self, ast):
return ast
def column_constraint(self, ast):
return ast
def identity_column_specification(self, ast):
return ast
def generation_clause(self, ast):
return ast
def generation_rule(self, ast):
return ast
def generation_expression(self, ast):
return ast
def default_clause(self, ast):
return ast
def default_option(self, ast):
return ast
def table_constraint_definition(self, ast):
return ast
def table_constraint(self, ast):
return ast
def unique_constraint_definition(self, ast):
return ast
def unique_specification(self, ast):
return ast
def referential_constraint_definition(self, ast):
return ast
def references_specification(self, ast):
return ast
def match_type(self, ast):
return ast
def referenced_table_and_columns(self, ast):
return ast
def referential_triggered_action(self, ast):
return ast
def update_rule(self, ast):
return ast
def delete_rule(self, ast):
return ast
def referential_action(self, ast):
return ast
def check_constraint_definition(self, ast):
return ast
def alter_table_statement(self, ast):
return ast
def alter_table_action(self, ast):
return ast
def add_column_definition(self, ast):
return ast
def alter_column_definition(self, ast):
return ast
def alter_column_action(self, ast):
return ast
def set_column_default_clause(self, ast):
return ast
def drop_column_default_clause(self, ast):
return ast
def add_column_scope_clause(self, ast):
return ast
def drop_column_scope_clause(self, ast):
return ast
def alter_identity_column_specification(self, ast):
return ast
def alter_identity_column_option(self, ast):
return ast
def drop_column_definition(self, ast):
return ast
def add_table_constraint_definition(self, ast):
return ast
def drop_table_constraint_definition(self, ast):
return ast
def drop_table_statement(self, ast):
return ast
def view_definition(self, ast):
return ast
def view_specification(self, ast):
return ast
def regular_view_specification(self, ast):
return ast
def referenceable_view_specification(self, ast):
return ast
def subview_clause(self, ast):
return ast
def view_element_list(self, ast):
return ast
def view_element(self, ast):
return ast
def view_column_option(self, ast):
return ast
def levels_clause(self, ast):
return ast
def drop_view_statement(self, ast):
return ast
def domain_definition(self, ast):
return ast
def domain_constraint(self, ast):
return ast
def alter_domain_statement(self, ast):
return ast
def alter_domain_action(self, ast):
return ast
def set_domain_default_clause(self, ast):
return ast
def drop_domain_default_clause(self, ast):
return ast
def add_domain_constraint_definition(self, ast):
return ast
def drop_domain_constraint_definition(self, ast):
return ast
def drop_domain_statement(self, ast):
return ast
def character_set_definition(self, ast):
return ast
def character_set_source(self, ast):
return ast
def drop_character_set_statement(self, ast):
return ast
def collation_definition(self, ast):
return ast
def pad_characteristic(self, ast):
return ast
def drop_collation_statement(self, ast):
return ast
def transliteration_definition(self, ast):
return ast
def transliteration_source(self, ast):
return ast
def drop_transliteration_statement(self, ast):
return ast
def assertion_definition(self, ast):
return ast
def drop_assertion_statement(self, ast):
return ast
def trigger_definition(self, ast):
return ast
def trigger_action_time(self, ast):
return ast
def trigger_event(self, ast):
return ast
def triggered_action(self, ast):
return ast
def triggered_sql_statement(self, ast):
return ast
def old_or_new_values_alias_list(self, ast):
return ast
def old_or_new_values_alias(self, ast):
return ast
def drop_trigger_statement(self, ast):
return ast
def user_defined_type_definition(self, ast):
return ast
def user_defined_type_body(self, ast):
return ast
def user_defined_type_option_list(self, ast):
return ast
def user_defined_type_option(self, ast):
return ast
def subtype_clause(self, ast):
return ast
def representation(self, ast):
return ast
def member_list(self, ast):
return ast
def member(self, ast):
return ast
def instantiable_clause(self, ast):
return ast
def finality(self, ast):
return ast
def reference_type_specification(self, ast):
return ast
def user_defined_representation(self, ast):
return ast
def derived_representation(self, ast):
return ast
def system_generated_representation(self, ast):
return ast
def cast_to_ref(self, ast):
return ast
def cast_to_type(self, ast):
return ast
def list_of_attributes(self, ast):
return ast
def cast_to_distinct(self, ast):
return ast
def cast_to_source(self, ast):
return ast
def method_specification_list(self, ast):
return ast
def method_specification(self, ast):
return ast
def original_method_specification(self, ast):
return ast
def overriding_method_specification(self, ast):
return ast
def partial_method_specification(self, ast):
return ast
def method_characteristics(self, ast):
return ast
def method_characteristic(self, ast):
return ast
def attribute_definition(self, ast):
return ast
def alter_type_statement(self, ast):
return ast
def alter_type_action(self, ast):
return ast
def add_attribute_definition(self, ast):
return ast
def drop_attribute_definition(self, ast):
return ast
def add_original_method_specification(self, ast):
return ast
def add_overriding_method_specification(self, ast):
return ast
def drop_method_specification(self, ast):
return ast
def specific_method_specification_designator(self, ast):
return ast
def drop_data_type_statement(self, ast):
return ast
def schema_routine(self, ast):
return ast
def schema_procedure(self, ast):
return ast
def schema_function(self, ast):
return ast
def sql_invoked_procedure(self, ast):
return ast
def sql_invoked_function(self, ast):
return ast
def sql_parameter_declaration_list(self, ast):
return ast
def sql_parameter_declaration(self, ast):
return ast
def parameter_mode(self, ast):
return ast
def parameter_type(self, ast):
return ast
def locator_indication(self, ast):
return ast
def function_specification(self, ast):
return ast
def method_specification_designator(self, ast):
return ast
def routine_characteristics(self, ast):
return ast
def routine_characteristic(self, ast):
return ast
def savepoint_level_indication(self, ast):
return ast
def dynamic_result_sets_characteristic(self, ast):
return ast
def parameter_style_clause(self, ast):
return ast
def dispatch_clause(self, ast):
return ast
def returns_clause(self, ast):
return ast
def returns_type(self, ast):
return ast
def returns_table_type(self, ast):
return ast
def table_function_column_list(self, ast):
return ast
def table_function_column_list_element(self, ast):
return ast
def result_cast(self, ast):
return ast
def result_cast_from_type(self, ast):
return ast
def returns_data_type(self, ast):
return ast
def routine_body(self, ast):
return ast
def sql_routine_spec(self, ast):
return ast
def rights_clause(self, ast):
return ast
def external_body_reference(self, ast):
return ast
def external_security_clause(self, ast):
return ast
def parameter_style(self, ast):
return ast
def deterministic_characteristic(self, ast):
return ast
def sql_data_access_indication(self, ast):
return ast
def null_call_clause(self, ast):
return ast
def transform_group_specification(self, ast):
return ast
def multiple_group_specification(self, ast):
return ast
def group_specification(self, ast):
return ast
def alter_routine_statement(self, ast):
return ast
def alter_routine_characteristics(self, ast):
return ast
def alter_routine_characteristic(self, ast):
return ast
def drop_routine_statement(self, ast):
return ast
def user_defined_cast_definition(self, ast):
return ast
def source_data_type(self, ast):
return ast
def target_data_type(self, ast):
return ast
def drop_user_defined_cast_statement(self, ast):
return ast
def user_defined_ordering_definition(self, ast):
return ast
def ordering_form(self, ast):
return ast
def equals_ordering_form(self, ast):
return ast
def full_ordering_form(self, ast):
return ast
def ordering_category(self, ast):
return ast
def relative_category(self, ast):
return ast
def map_category(self, ast):
return ast
def state_category(self, ast):
return ast
def drop_user_defined_ordering_statement(self, ast):
return ast
def transform_definition(self, ast):
return ast
def transform_group(self, ast):
return ast
def transform_element_list(self, ast):
return ast
def transform_element(self, ast):
return ast
def to_sql(self, ast):
return ast
def from_sql(self, ast):
return ast
def alter_transform_statement(self, ast):
return ast
def alter_group(self, ast):
return ast
def alter_transform_action_list(self, ast):
return ast
def alter_transform_action(self, ast):
return ast
def add_transform_element_list(self, ast):
return ast
def drop_transform_element_list(self, ast):
return ast
def transform_kind(self, ast):
return ast
def drop_transform_statement(self, ast):
return ast
def transforms_to_be_dropped(self, ast):
return ast
def sequence_generator_definition(self, ast):
return ast
def sequence_generator_options(self, ast):
return ast
def sequence_generator_option(self, ast):
return ast
def common_sequence_generator_options(self, ast):
return ast
def common_sequence_generator_option(self, ast):
return ast
def basic_sequence_generator_option(self, ast):
return ast
def sequence_generator_data_type_option(self, ast):
return ast
def sequence_generator_start_with_option(self, ast):
return ast
def sequence_generator_increment_by_option(self, ast):
return ast
def sequence_generator_maxvalue_option(self, ast):
return ast
def sequence_generator_minvalue_option(self, ast):
return ast
def sequence_generator_cycle_option(self, ast):
return ast
def alter_sequence_generator_statement(self, ast):
return ast
def alter_sequence_generator_options(self, ast):
return ast
def alter_sequence_generator_option(self, ast):
return ast
def alter_sequence_generator_restart_option(self, ast):
return ast
def drop_sequence_generator_statement(self, ast):
return ast
def grant_statement(self, ast):
return ast
def grant_privilege_statement(self, ast):
return ast
def privileges(self, ast):
return ast
def object_name(self, ast):
return ast
def object_privileges(self, ast):
return ast
def action(self, ast):
return ast
def privilege_method_list(self, ast):
return ast
def grantee(self, ast):
return ast
def grantor(self, ast):
return ast
def role_definition(self, ast):
return ast
def grant_role_statement(self, ast):
return ast
def drop_role_statement(self, ast):
return ast
def revoke_statement(self, ast):
return ast
def revoke_privilege_statement(self, ast):
return ast
def revoke_option_extension(self, ast):
return ast
def revoke_role_statement(self, ast):
return ast
def character_set_specification_list(self, ast):
return ast
def sql_procedure_statement(self, ast):
return ast
def sql_executable_statement(self, ast):
return ast
def sql_schema_statement(self, ast):
return ast
def sql_schema_definition_statement(self, ast):
return ast
def sql_schema_manipulation_statement(self, ast):
return ast
def sql_data_statement(self, ast):
return ast
def sql_data_change_statement(self, ast):
return ast
def sql_control_statement(self, ast):
return ast
def sql_transaction_statement(self, ast):
return ast
def sql_connection_statement(self, ast):
return ast
def sql_session_statement(self, ast):
return ast
def sql_dynamic_statement(self, ast):
return ast
def sql_dynamic_data_statement(self, ast):
return ast
def descriptor_statement(self, ast):
return ast
def cursor_sensitivity(self, ast):
return ast
def cursor_scrollability(self, ast):
return ast
def cursor_holdability(self, ast):
return ast
def cursor_returnability(self, ast):
return ast
def cursor_specification(self, ast):
return ast
def updatability_clause(self, ast):
return ast
def order_by_clause(self, ast):
return ast
def open_statement(self, ast):
return ast
def fetch_statement(self, ast):
return ast
def fetch_orientation(self, ast):
return ast
def fetch_target_list(self, ast):
return ast
def close_statement(self, ast):
return ast
def select_statement_single_row(self, ast):
return ast
def select_target_list(self, ast):
return ast
def delete_statement_positioned(self, ast):
return ast
def target_table(self, ast):
return ast
def delete_statement_searched(self, ast):
return ast
def insert_statement(self, ast):
return ast
def insert_columns_and_source(self, ast):
return ast
def from_subquery(self, ast):
return ast
def from_constructor(self, ast):
return ast
def override_clause(self, ast):
return ast
def from_default(self, ast):
return ast
def merge_statement(self, ast):
return ast
def merge_operation_specification(self, ast):
return ast
def merge_when_clause(self, ast):
return ast
def merge_when_matched_clause(self, ast):
return ast
def merge_when_not_matched_clause(self, ast):
return ast
def merge_update_specification(self, ast):
return ast
def merge_insert_specification(self, ast):
return ast
def merge_insert_value_list(self, ast):
return ast
def merge_insert_value_element(self, ast):
return ast
def update_statement_positioned(self, ast):
return ast
def update_statement_searched(self, ast):
return ast
def set_clause_list(self, ast):
return ast
def set_clause(self, ast):
return ast
def set_target(self, ast):
return ast
def multiple_column_assignment(self, ast):
return ast
def set_target_list(self, ast):
return ast
def update_target(self, ast):
return ast
def mutated_set_clause(self, ast):
return ast
def mutated_target(self, ast):
return ast
def update_source(self, ast):
return ast
def temporary_table_declaration(self, ast):
return ast
def free_locator_statement(self, ast):
return ast
def locator_reference(self, ast):
return ast
def hold_locator_statement(self, ast):
return ast
def call_statement(self, ast):
return ast
def return_statement(self, ast):
return ast
def return_value(self, ast):
return ast
def start_transaction_statement(self, ast):
return ast
def transaction_mode(self, ast):
return ast
def transaction_access_mode(self, ast):
return ast
def isolation_level(self, ast):
return ast
def level_of_isolation(self, ast):
return ast
def diagnostics_size(self, ast):
return ast
def set_transaction_statement(self, ast):
return ast
def transaction_characteristics(self, ast):
return ast
def set_constraints_mode_statement(self, ast):
return ast
def constraint_name_list(self, ast):
return ast
def savepoint_statement(self, ast):
return ast
def release_savepoint_statement(self, ast):
return ast
def commit_statement(self, ast):
return ast
def rollback_statement(self, ast):
return ast
def savepoint_clause(self, ast):
return ast
def connect_statement(self, ast):
return ast
def connection_target(self, ast):
return ast
def set_connection_statement(self, ast):
return ast
def connection_object(self, ast):
return ast
def disconnect_statement(self, ast):
return ast
def disconnect_object(self, ast):
return ast
def set_session_characteristics_statement(self, ast):
return ast
def session_characteristic_list(self, ast):
return ast
def session_characteristic(self, ast):
return ast
def set_session_user_identifier_statement(self, ast):
return ast
def set_role_statement(self, ast):
return ast
def role_specification(self, ast):
return ast
def set_local_time_zone_statement(self, ast):
return ast
def set_time_zone_value(self, ast):
return ast
def set_catalog_statement(self, ast):
return ast
def catalog_name_characteristic(self, ast):
return ast
def set_schema_statement(self, ast):
return ast
def schema_name_characteristic(self, ast):
return ast
def set_names_statement(self, ast):
return ast
def character_set_name_characteristic(self, ast):
return ast
def set_path_statement(self, ast):
return ast
def sql_path_characteristic(self, ast):
return ast
def set_transform_group_statement(self, ast):
return ast
def transform_group_characteristic(self, ast):
return ast
def set_session_collation_statement(self, ast):
return ast
def allocate_descriptor_statement(self, ast):
return ast
def deallocate_descriptor_statement(self, ast):
return ast
def get_descriptor_statement(self, ast):
return ast
def get_descriptor_information(self, ast):
return ast
def get_header_information(self, ast):
return ast
def header_item_name(self, ast):
return ast
def get_item_information(self, ast):
return ast
def item_number(self, ast):
return ast
def descriptor_item_name(self, ast):
return ast
def set_descriptor_statement(self, ast):
return ast
def set_descriptor_information(self, ast):
return ast
def set_header_information(self, ast):
return ast
def set_item_information(self, ast):
return ast
def prepare_statement(self, ast):
return ast
def attributes_specification(self, ast):
return ast
def sql_statement_variable(self, ast):
return ast
def deallocate_prepared_statement(self, ast):
return ast
def describe_statement(self, ast):
return ast
def describe_input_statement(self, ast):
return ast
def describe_output_statement(self, ast):
return ast
def nesting_option(self, ast):
return ast
def using_descriptor(self, ast):
return ast
def described_object(self, ast):
return ast
def input_using_clause(self, ast):
return ast
def using_arguments(self, ast):
return ast
def using_argument(self, ast):
return ast
def output_using_clause(self, ast):
return ast
def into_arguments(self, ast):
return ast
def into_argument(self, ast):
return ast
def into_descriptor(self, ast):
return ast
def execute_statement(self, ast):
return ast
def execute_immediate_statement(self, ast):
return ast
def allocate_cursor_statement(self, ast):
return ast
def cursor_intent(self, ast):
return ast
def statement_cursor(self, ast):
return ast
def result_set_cursor(self, ast):
return ast
def dynamic_open_statement(self, ast):
return ast
def dynamic_fetch_statement(self, ast):
return ast
def dynamic_close_statement(self, ast):
return ast
def dynamic_delete_statement_positioned(self, ast):
return ast
def dynamic_update_statement_positioned(self, ast):
return ast
def direct_sql_statement(self, ast):
return ast
def directly_executable_statement(self, ast):
return ast
def direct_sql_data_statement(self, ast):
return ast
def get_diagnostics_statement(self, ast):
return ast
def sql_diagnostics_information(self, ast):
return ast
def statement_information(self, ast):
return ast
def statement_information_item(self, ast):
return ast
def statement_information_item_name(self, ast):
return ast
def condition_information(self, ast):
return ast
def condition_information_item(self, ast):
return ast
def condition_information_item_name(self, ast):
return ast
def ref_cast_option(self, ast):
return ast
def cast_option(self, ast):
return ast
def reference_scope_check(self, ast):
return ast
def start(self, ast):
return ast
def main(
filename,
startrule,
trace=False,
whitespace=None,
nameguard=None,
comments_re='/\\*[\\s\\S]*?\\*/',
eol_comments_re='--.*?$',
ignorecase=True,
left_recursion=True,
**kwargs):
with open(filename) as f:
text = f.read()
whitespace = whitespace or '\\s+'
parser = SqlParser(parseinfo=False)
ast = parser.parse(
text,
startrule,
filename=filename,
trace=trace,
whitespace=whitespace,
nameguard=nameguard,
ignorecase=ignorecase,
**kwargs)
return ast
if __name__ == '__main__':
import json
ast = generic_main(main, SqlParser, name='Sql')
print('AST:')
print(ast)
print()
print('JSON:')
print(json.dumps(ast, indent=2))
print()
| mit |
direvus/ansible | test/units/modules/network/nxos/test_nxos_pim_rp_address.py | 57 | 2519 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_pim_rp_address
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosPimRpAddressModule(TestNxosModule):
module = nxos_pim_rp_address
def setUp(self):
super(TestNxosPimRpAddressModule, self).setUp()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_pim_rp_address.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_pim_rp_address.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
super(TestNxosPimRpAddressModule, self).tearDown()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_pim_rp_address', 'config.cfg')
self.load_config.return_value = None
def test_nxos_pim_rp_address(self):
set_module_args(dict(rp_address='5.6.7.8'))
self.execute_module(changed=True, commands=['ip pim rp-address 5.6.7.8'])
def test_nxos_pim_rp_address_no_change(self):
set_module_args(dict(rp_address='1.2.3.4'))
self.execute_module(changed=False, commands=[])
def test_nxos_pim_rp_address_absent(self):
set_module_args(dict(rp_address='1.2.3.4', state='absent'))
self.execute_module(changed=True, commands=['no ip pim rp-address 1.2.3.4'])
def test_nxos_pim_rp_address_absent_no_change(self):
set_module_args(dict(rp_address='5.6.7.8', state='absent'))
self.execute_module(changed=False, commands=[])
| gpl-3.0 |
WhileLoop/ansible-modules-extras | system/locale_gen.py | 5 | 7662 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: locale_gen
short_description: Creates or removes locales.
description:
- Manages locales by editing /etc/locale.gen and invoking locale-gen.
version_added: "1.6"
author: "Augustus Kling (@AugustusKling)"
options:
name:
description:
- Name and encoding of the locale, such as "en_GB.UTF-8".
required: true
default: null
aliases: []
state:
description:
- Whether the locale shall be present.
required: false
choices: ["present", "absent"]
default: "present"
'''
EXAMPLES = '''
# Ensure a locale exists.
- locale_gen: name=de_CH.UTF-8 state=present
'''
import os
import os.path
from subprocess import Popen, PIPE, call
import re
from ansible.module_utils.basic import *
from ansible.module_utils.pycompat24 import get_exception
LOCALE_NORMALIZATION = {
".utf8": ".UTF-8",
".eucjp": ".EUC-JP",
".iso885915": ".ISO-8859-15",
".cp1251": ".CP1251",
".koi8r": ".KOI8-R",
".armscii8": ".ARMSCII-8",
".euckr": ".EUC-KR",
".gbk": ".GBK",
".gb18030": ".GB18030",
".euctw": ".EUC-TW",
}
# ===========================================
# location module specific support methods.
#
def is_available(name, ubuntuMode):
"""Check if the given locale is available on the system. This is done by
checking either :
* if the locale is present in /etc/locales.gen
* or if the locale is present in /usr/share/i18n/SUPPORTED"""
if ubuntuMode:
__regexp = '^(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/usr/share/i18n/SUPPORTED'
else:
__regexp = '^#{0,1}\s*(?P<locale>\S+_\S+) (?P<charset>\S+)\s*$'
__locales_available = '/etc/locale.gen'
re_compiled = re.compile(__regexp)
fd = open(__locales_available, 'r')
for line in fd:
result = re_compiled.match(line)
if result and result.group('locale') == name:
return True
fd.close()
return False
def is_present(name):
"""Checks if the given locale is currently installed."""
output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0]
return any(fix_case(name) == fix_case(line) for line in output.splitlines())
def fix_case(name):
"""locale -a might return the encoding in either lower or upper case.
Passing through this function makes them uniform for comparisons."""
for s, r in LOCALE_NORMALIZATION.items():
name = name.replace(s, r)
return name
def replace_line(existing_line, new_line):
"""Replaces lines in /etc/locale.gen"""
try:
f = open("/etc/locale.gen", "r")
lines = [line.replace(existing_line, new_line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def set_locale(name, enabled=True):
""" Sets the state of the locale. Defaults to enabled. """
search_string = '#{0,1}\s*%s (?P<charset>.+)' % name
if enabled:
new_string = '%s \g<charset>' % (name)
else:
new_string = '# %s \g<charset>' % (name)
try:
f = open("/etc/locale.gen", "r")
lines = [re.sub(search_string, new_string, line) for line in f]
finally:
f.close()
try:
f = open("/etc/locale.gen", "w")
f.write("".join(lines))
finally:
f.close()
def apply_change(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
set_locale(name, enabled=True)
else:
# Delete locale.
set_locale(name, enabled=False)
localeGenExitValue = call("locale-gen")
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
def apply_change_ubuntu(targetState, name):
"""Create or remove locale.
Keyword arguments:
targetState -- Desired state, either present or absent.
name -- Name including encoding such as de_CH.UTF-8.
"""
if targetState=="present":
# Create locale.
# Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local
localeGenExitValue = call(["locale-gen", name])
else:
# Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales.
try:
f = open("/var/lib/locales/supported.d/local", "r")
content = f.readlines()
finally:
f.close()
try:
f = open("/var/lib/locales/supported.d/local", "w")
for line in content:
locale, charset = line.split(' ')
if locale != name:
f.write(line)
finally:
f.close()
# Purge locales and regenerate.
# Please provide a patch if you know how to avoid regenerating the locales to keep!
localeGenExitValue = call(["locale-gen", "--purge"])
if localeGenExitValue!=0:
raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue))
# ==============================================================
# main
def main():
module = AnsibleModule(
argument_spec = dict(
name = dict(required=True),
state = dict(choices=['present','absent'], default='present'),
),
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
if not os.path.exists("/etc/locale.gen"):
if os.path.exists("/var/lib/locales/supported.d/"):
# Ubuntu created its own system to manage locales.
ubuntuMode = True
else:
module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package \"locales\" installed?")
else:
# We found the common way to manage locales.
ubuntuMode = False
if not is_available(name, ubuntuMode):
module.fail_json(msg="The locales you've entered is not available "
"on your system.")
if is_present(name):
prev_state = "present"
else:
prev_state = "absent"
changed = (prev_state!=state)
if module.check_mode:
module.exit_json(changed=changed)
else:
if changed:
try:
if ubuntuMode==False:
apply_change(state, name)
else:
apply_change_ubuntu(state, name)
except EnvironmentError:
e = get_exception()
module.fail_json(msg=e.strerror, exitValue=e.errno)
module.exit_json(name=name, changed=changed, msg="OK")
main()
| gpl-3.0 |
a-parhom/edx-platform | lms/djangoapps/survey/migrations/0001_initial.py | 14 | 2347 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
from django.conf import settings
import model_utils.fields
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SurveyAnswer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('field_name', models.CharField(max_length=255, db_index=True)),
('field_value', models.CharField(max_length=1024)),
('course_key', CourseKeyField(max_length=255, null=True, db_index=True)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='SurveyForm',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('name', models.CharField(unique=True, max_length=255, db_index=True)),
('form', models.TextField()),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='surveyanswer',
name='form',
field=models.ForeignKey(to='survey.SurveyForm', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='surveyanswer',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
]
| agpl-3.0 |
PigheadedGnu/boardgamegeek | boardgamegeek/utils.py | 1 | 14201 | # coding: utf-8
"""
:mod:`boardgamegeek.utils` - Generic helper functions
=====================================================
.. module:: boardgamegeek.utils
:platform: Unix, Windows
:synopsis: generic helper functions
.. moduleauthor:: Cosmin Luță <q4break@gmail.com>
"""
from __future__ import unicode_literals
import sys
import xml.etree.ElementTree as ET
from xml.etree.ElementTree import ParseError as ETParseError
import requests_cache
import requests
import logging
import time
import threading
from requests.adapters import HTTPAdapter
try:
import urllib.parse as urlparse
except:
import urlparse
from .exceptions import BoardGameGeekAPIError, BoardGameGeekAPIRetryError, BoardGameGeekError
from .exceptions import BoardGameGeekAPINonXMLError, BoardGameGeekTimeoutError
log = logging.getLogger("boardgamegeek.utils")
DEFAULT_REQUESTS_PER_MINUTE = 30
class RateLimitingAdapter(HTTPAdapter):
"""
Adapter for the Requests library which makes sure there's a delay between consecutive requests to the BGG site
so that we don't get throttled
"""
__last_request_timestamp = None # time when the last request was made
__time_between_requests = 0 # interval to wait between requests in order to match the expected number of
# requests per second
__rate_limit_lock = threading.Lock()
def __init__(self, rpm=DEFAULT_REQUESTS_PER_MINUTE, **kw):
"""
:param rpm: how many requests per minute to allow
:param kw:
:return:
"""
if rpm <= 0:
log.warn("invalid requests per minute value ({}), falling back to default".format(rpm))
rpm = DEFAULT_REQUESTS_PER_MINUTE
RateLimitingAdapter.__time_between_requests = 60.0 / float(rpm)
super(RateLimitingAdapter, self).__init__(**kw)
def send(self, request, **kw):
log.debug("acquiring rate limiting lock")
with RateLimitingAdapter.__rate_limit_lock:
log.debug("time between requests:{}, last request timestamp: {}".format(RateLimitingAdapter.__time_between_requests,
RateLimitingAdapter.__last_request_timestamp))
# determine if we need to sleep in order to enforce the maximum requested amount of requests per minute
if RateLimitingAdapter.__last_request_timestamp is not None:
time_delta = time.time() - RateLimitingAdapter.__last_request_timestamp
need_to_wait = RateLimitingAdapter.__time_between_requests - time_delta
log.debug("time since last request: {}, need to wait: {}".format(time_delta, need_to_wait))
if need_to_wait > 0:
time.sleep(need_to_wait)
RateLimitingAdapter.__last_request_timestamp = time.time()
log.debug("releasing rate limiting lock")
log.debug("sending request: {}".format(request))
return super(RateLimitingAdapter, self).send(request, **kw)
class DictObject(object):
"""
Just a fancy wrapper over a dictionary
"""
def __init__(self, data):
self._data = data
def __getattr__(self, item):
# allow accessing user's variables using .attribute
try:
return self._data[item]
except:
raise AttributeError
def data(self):
"""
Access to the internal data dictionary, for easy dumping
:return: the internal data dictionary
"""
return self._data
def xml_subelement_attr(xml_elem, subelement, convert=None, attribute="value", default=None, quiet=False):
"""
Search for a sub-element and return the value of its attribute.
For the following XML document:
.. code-block:: xml
<xml_elem>
<subelement value="THIS" />
</xml_elem>
a call to ``xml_subelement_attr(xml_elem, "subelement")`` would return ``"THIS"``
:param xml_elem: search the children nodes of this element
:param subelement: Name of the sub-element to search for
:param convert: if not None, a callable to perform the conversion of this attribute to a certain object type
:param attribute: name of the attribute to get
:param default: default value if the subelement or attribute is not found
:param quiet: if True, don't raise exception from conversions, return default instead
:return: value of the attribute or ``None`` in error cases
"""
if xml_elem is None or not subelement:
return None
subel = xml_elem.find(subelement)
if subel is None:
value = default
else:
value = subel.attrib.get(attribute)
if value is None:
value = default
elif convert:
try:
value = convert(value)
except:
if quiet:
value = default
else:
raise
return value
def xml_subelement_attr_list(xml_elem, subelement, convert=None, attribute="value", default=None, quiet=False):
"""
Search for sub-elements and return a list of the specified attribute.
.. code-block:: xml
<xml_elem>
<subelement value="THIS" />
<subelement value="THIS2" />
...
</xml_elem>
For the above document, ["THIS", "THIS2"] will be returned
:param xml_elem: search the children nodes of this element
:param subelement: name of the sub-element to search for
:param convert: if not None, a callable used to perform the conversion of this attribute to a certain object type
:param attribute: name of the attribute to get
:param default: default value to use if an attribute is missing
:param quiet: if True, don't raise exceptions from conversions, instead use the default value
:return: list containing the values of the attributes or ``None`` in error cases
"""
if xml_elem is None or not subelement:
return None
subel = xml_elem.findall(subelement)
res = []
for e in subel:
value = e.attrib.get(attribute)
if value is None:
value = default
elif convert:
try:
value = convert(value)
except:
if quiet:
value = default
else:
raise
res.append(value)
return res
def xml_subelement_text(xml_elem, subelement, convert=None, default=None, quiet=False):
"""
Return the text of the specified subelement
For the document below:
.. code-block:: xml
<xml_elem>
<subelement>text</subelement>
</xml_elem>
``"text"`` will be returned
:param xml_elem: search the children nodes of this element
:param subelement: name of the subelement whose text will be retrieved
:param convert: if not None, a callable used to perform the conversion of the text to a certain object type
:param default: default value if subelement is not found
:param quiet: if True, don't raise exceptions from conversions, instead use the default value
:return: The text associated with the sub-element or ``None`` in case of error
"""
if xml_elem is None or not subelement:
return None
subel = xml_elem.find(subelement)
if subel is None:
text = default
else:
text = subel.text
if text is None:
text = default
elif convert:
try:
text = convert(text)
except:
if quiet:
text = default
else:
raise
return text
def get_parsed_xml_response(requests_session, url, params=None, timeout=15, retries=3, retry_delay=5):
"""
Downloads an XML from the specified url, parses it and returns the xml ElementTree.
:param requests_session: A Session of the ``requests`` library, used to fetch the url
:param url: the address where to get the XML from
:param params: dictionary containing the parameters which should be sent with the request
:param timeout: number of seconds after which the request times out
:param retries: number of retries to perform in case of timeout
:param retry_delay: the amount of seconds to sleep when retrying an API call that returned 202
:return: :py:func:`xml.etree.ElementTree` corresponding to the XML
:raises: :py:class:`BoardGameGeekAPINonXMLError` if the API response wasn't XML
:raises: :py:class:`BoardGameGeekAPIRetryError` if this request should be retried after a short delay
:raises: :py:class:`BoardGameGeekAPIError` if the response couldn't be parsed
:raises: :py:class:`BoardGameGeekTimeoutError` if there was a timeout
"""
retr = retries
# retry loop
while retr >= 0:
retr -= 1
try:
r = requests_session.get(url, params=params, timeout=timeout)
if r.status_code == 202:
if retries == 0:
# no retries have been requested, therefore raise exception to signal the application that it
# needs to retry
# (BoardGameGeek API says that on status code 202 the call should be retried after a delay)
raise BoardGameGeekAPIRetryError()
elif retr == 0:
# retries were requested, but we reached 0. Signal the application that it needs to retry itself.
raise BoardGameGeekAPIRetryError("failed to retrieve data after {} retries".format(retries))
else:
# sleep for the specified delay and retry
log.debug("API call will be retried in {} seconds ({} more retries)".format(retry_delay, retr))
if retr >= 0:
time.sleep(retry_delay)
retry_delay *= 1.5
continue
elif r.status_code == 503:
# it seems they added some sort of protection which triggers when too many requests are made, in which
# case we get back a 503. Try to delay and retry
log.warning("API returned 503, retrying")
if retr >= 0:
time.sleep(retry_delay)
retry_delay *= 3
continue
if not r.headers.get("content-type").startswith("text/xml"):
raise BoardGameGeekAPINonXMLError("non-XML reply")
xml = r.text
if sys.version_info >= (3,):
root_elem = ET.fromstring(xml)
else:
utf8_xml = xml.encode("utf-8")
root_elem = ET.fromstring(utf8_xml)
return root_elem
except requests.exceptions.Timeout:
if retries == 0:
raise BoardGameGeekTimeoutError()
elif retr == 0:
# ... reached 0 retries
raise BoardGameGeekTimeoutError("failed to retrieve data after {} retries".format(retries))
else:
log.debug("API request timeout, retrying {} more times w/timeout {}".format(retr, timeout))
timeout *= 2.5
continue
except ETParseError as e:
raise BoardGameGeekAPIError("error decoding BGG API response: {}".format(e))
except (BoardGameGeekAPIRetryError, BoardGameGeekAPINonXMLError, BoardGameGeekTimeoutError):
raise
except Exception as e:
raise BoardGameGeekAPIError("error fetching BGG API response: {}".format(e))
raise BoardGameGeekAPIError("couldn't fetch data within the configured number of retries")
def get_cache_session_from_uri(uri):
"""
Returns a requests-cache session using caching specified in the URI. Valid uris are:
* memory:///?ttl=<seconds>
* sqlite:///path/to/sqlite.db?ttl=<seconds>&fast_save=<0|1>
:param uri: URI specifying the type of cache to use and its parameters
:return: CachedSession instance, which can be used as a regular ``requests`` session.
:raises: :class:`BoardGameGeekError` in case of error
"""
try:
r = urlparse.urlparse(uri)
args = urlparse.parse_qs(r.query)
# if not specified, default cache time is 3600 seconds
ttl = int(args.get("ttl", ['3600'])[0])
if r.scheme == "memory":
return requests_cache.core.CachedSession(backend="memory",
expire_after=ttl,
allowable_codes=(200,))
elif r.scheme == "sqlite":
fast_save = args.get("fast_save", ["0"])[0] != "0"
return requests_cache.core.CachedSession(cache_name=r.path,
backend="sqlite",
expire_after=ttl,
extension="",
fast_save=fast_save,
allowable_codes=(200,))
# TODO: add the redis backend
# elif r.scheme == "redis":
# return requests_cache.core.CachedSession(cache_name=args.get("prefix", ["cache"])[0],
# backend="redis",
# expire_after=ttl,
# allowable_codes=(200,))
# TODO: add the mongo backend
except Exception as e:
raise BoardGameGeekError("error trying to create a CachedSession from '{}': {}".format(uri, e))
raise BoardGameGeekError("invalid cache URI: {}".format(uri))
def fix_url(url):
"""
The BGG API started returning URLs like //cf.geekdo-images.com/images/pic55406.jpg for thumbnails and images.
This function fixes them.
:param url: the url to fix
:return: the fixed url
"""
if url and url.startswith("//"):
url = "http:{}".format(url)
return url
| bsd-3-clause |
kevinmel2000/sl4a | python/src/Lib/test/test_zipimport.py | 55 | 17378 | import sys
import os
import marshal
import imp
import struct
import time
import unittest
import zlib # implied prerequisite
from zipfile import ZipFile, ZipInfo, ZIP_STORED, ZIP_DEFLATED
from test import test_support
from test.test_importhooks import ImportHooksBaseTestCase, test_src, test_co
import zipimport
import linecache
import doctest
import inspect
import StringIO
from traceback import extract_tb, extract_stack, print_tb
raise_src = 'def do_raise(): raise TypeError\n'
# so we only run testAFakeZlib once if this test is run repeatedly
# which happens when we look for ref leaks
test_imported = False
def make_pyc(co, mtime):
data = marshal.dumps(co)
if type(mtime) is type(0.0):
# Mac mtimes need a bit of special casing
if mtime < 0x7fffffff:
mtime = int(mtime)
else:
mtime = int(-0x100000000L + long(mtime))
pyc = imp.get_magic() + struct.pack("<i", int(mtime)) + data
return pyc
def module_path_to_dotted_name(path):
return path.replace(os.sep, '.')
NOW = time.time()
test_pyc = make_pyc(test_co, NOW)
if __debug__:
pyc_ext = ".pyc"
else:
pyc_ext = ".pyo"
TESTMOD = "ziptestmodule"
TESTPACK = "ziptestpackage"
TESTPACK2 = "ziptestpackage2"
TEMP_ZIP = os.path.abspath("junk95142" + os.extsep + "zip")
class UncompressedZipImportTestCase(ImportHooksBaseTestCase):
compression = ZIP_STORED
def setUp(self):
# We're reusing the zip archive path, so we must clear the
# cached directory info and linecache
linecache.clearcache()
zipimport._zip_directory_cache.clear()
ImportHooksBaseTestCase.setUp(self)
def doTest(self, expected_ext, files, *modules, **kw):
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
stuff = kw.get("stuff", None)
if stuff is not None:
# Prepend 'stuff' to the start of the zipfile
f = open(TEMP_ZIP, "rb")
data = f.read()
f.close()
f = open(TEMP_ZIP, "wb")
f.write(stuff)
f.write(data)
f.close()
sys.path.insert(0, TEMP_ZIP)
mod = __import__(".".join(modules), globals(), locals(),
["__dummy__"])
call = kw.get('call')
if call is not None:
call(mod)
if expected_ext:
file = mod.get_file()
self.assertEquals(file, os.path.join(TEMP_ZIP,
*modules) + expected_ext)
finally:
z.close()
os.remove(TEMP_ZIP)
def testAFakeZlib(self):
#
# This could cause a stack overflow before: importing zlib.py
# from a compressed archive would cause zlib to be imported
# which would find zlib.py in the archive, which would... etc.
#
# This test *must* be executed first: it must be the first one
# to trigger zipimport to import zlib (zipimport caches the
# zlib.decompress function object, after which the problem being
# tested here wouldn't be a problem anymore...
# (Hence the 'A' in the test method name: to make it the first
# item in a list sorted by name, like unittest.makeSuite() does.)
#
# This test fails on platforms on which the zlib module is
# statically linked, but the problem it tests for can't
# occur in that case (builtin modules are always found first),
# so we'll simply skip it then. Bug #765456.
#
if "zlib" in sys.builtin_module_names:
return
if "zlib" in sys.modules:
del sys.modules["zlib"]
files = {"zlib.py": (NOW, test_src)}
try:
self.doTest(".py", files, "zlib")
except ImportError:
if self.compression != ZIP_DEFLATED:
self.fail("expected test to not raise ImportError")
else:
if self.compression != ZIP_STORED:
self.fail("expected test to raise ImportError")
def testPy(self):
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD)
def testPyc(self):
files = {TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTMOD)
def testBoth(self):
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTMOD)
def testEmptyPy(self):
files = {TESTMOD + ".py": (NOW, "")}
self.doTest(None, files, TESTMOD)
def testBadMagic(self):
# make pyc magic word invalid, forcing loading from .py
m0 = ord(test_pyc[0])
m0 ^= 0x04 # flip an arbitrary bit
badmagic_pyc = chr(m0) + test_pyc[1:]
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
self.doTest(".py", files, TESTMOD)
def testBadMagic2(self):
# make pyc magic word invalid, causing an ImportError
m0 = ord(test_pyc[0])
m0 ^= 0x04 # flip an arbitrary bit
badmagic_pyc = chr(m0) + test_pyc[1:]
files = {TESTMOD + pyc_ext: (NOW, badmagic_pyc)}
try:
self.doTest(".py", files, TESTMOD)
except ImportError:
pass
else:
self.fail("expected ImportError; import from bad pyc")
def testBadMTime(self):
t3 = ord(test_pyc[7])
t3 ^= 0x02 # flip the second bit -- not the first as that one
# isn't stored in the .py's mtime in the zip archive.
badtime_pyc = test_pyc[:7] + chr(t3) + test_pyc[8:]
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, badtime_pyc)}
self.doTest(".py", files, TESTMOD)
def testPackage(self):
packdir = TESTPACK + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir + TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTPACK, TESTMOD)
def testDeepPackage(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
self.doTest(pyc_ext, files, TESTPACK, TESTPACK2, TESTMOD)
def testZipImporterMethods(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP)
self.assertEquals(zi.archive, TEMP_ZIP)
self.assertEquals(zi.is_package(TESTPACK), True)
mod = zi.load_module(TESTPACK)
self.assertEquals(zi._get_filename(TESTPACK), mod.__file__)
self.assertEquals(zi.is_package(packdir + '__init__'), False)
self.assertEquals(zi.is_package(packdir + TESTPACK2), True)
self.assertEquals(zi.is_package(packdir2 + TESTMOD), False)
mod_path = packdir2 + TESTMOD
mod_name = module_path_to_dotted_name(mod_path)
pkg = __import__(mod_name)
mod = sys.modules[mod_name]
self.assertEquals(zi.get_source(TESTPACK), None)
self.assertEquals(zi.get_source(mod_path), None)
self.assertEquals(zi._get_filename(mod_path), mod.__file__)
# To pass in the module name instead of the path, we must use the right importer
loader = mod.__loader__
self.assertEquals(loader.get_source(mod_name), None)
self.assertEquals(loader._get_filename(mod_name), mod.__file__)
# test prefix and archivepath members
zi2 = zipimport.zipimporter(TEMP_ZIP + os.sep + TESTPACK)
self.assertEquals(zi2.archive, TEMP_ZIP)
self.assertEquals(zi2.prefix, TESTPACK + os.sep)
finally:
z.close()
os.remove(TEMP_ZIP)
def testZipImporterMethodsInSubDirectory(self):
packdir = TESTPACK + os.sep
packdir2 = packdir + TESTPACK2 + os.sep
files = {packdir2 + "__init__" + pyc_ext: (NOW, test_pyc),
packdir2 + TESTMOD + pyc_ext: (NOW, test_pyc)}
z = ZipFile(TEMP_ZIP, "w")
try:
for name, (mtime, data) in files.items():
zinfo = ZipInfo(name, time.localtime(mtime))
zinfo.compress_type = self.compression
z.writestr(zinfo, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP + os.sep + packdir)
self.assertEquals(zi.archive, TEMP_ZIP)
self.assertEquals(zi.prefix, packdir)
self.assertEquals(zi.is_package(TESTPACK2), True)
mod = zi.load_module(TESTPACK2)
self.assertEquals(zi._get_filename(TESTPACK2), mod.__file__)
self.assertEquals(zi.is_package(TESTPACK2 + os.sep + '__init__'), False)
self.assertEquals(zi.is_package(TESTPACK2 + os.sep + TESTMOD), False)
mod_path = TESTPACK2 + os.sep + TESTMOD
mod_name = module_path_to_dotted_name(mod_path)
pkg = __import__(mod_name)
mod = sys.modules[mod_name]
self.assertEquals(zi.get_source(TESTPACK2), None)
self.assertEquals(zi.get_source(mod_path), None)
self.assertEquals(zi._get_filename(mod_path), mod.__file__)
# To pass in the module name instead of the path, we must use the right importer
loader = mod.__loader__
self.assertEquals(loader.get_source(mod_name), None)
self.assertEquals(loader._get_filename(mod_name), mod.__file__)
finally:
z.close()
os.remove(TEMP_ZIP)
def testGetData(self):
z = ZipFile(TEMP_ZIP, "w")
z.compression = self.compression
try:
name = "testdata.dat"
data = "".join([chr(x) for x in range(256)]) * 500
z.writestr(name, data)
z.close()
zi = zipimport.zipimporter(TEMP_ZIP)
self.assertEquals(data, zi.get_data(name))
self.assert_('zipimporter object' in repr(zi))
finally:
z.close()
os.remove(TEMP_ZIP)
def testImporterAttr(self):
src = """if 1: # indent hack
def get_file():
return __file__
if __loader__.get_data("some.data") != "some data":
raise AssertionError, "bad data"\n"""
pyc = make_pyc(compile(src, "<???>", "exec"), NOW)
files = {TESTMOD + pyc_ext: (NOW, pyc),
"some.data": (NOW, "some data")}
self.doTest(pyc_ext, files, TESTMOD)
def testImport_WithStuff(self):
# try importing from a zipfile which contains additional
# stuff at the beginning of the file
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD,
stuff="Some Stuff"*31)
def assertModuleSource(self, module):
self.assertEqual(inspect.getsource(module), test_src)
def testGetSource(self):
files = {TESTMOD + ".py": (NOW, test_src)}
self.doTest(".py", files, TESTMOD, call=self.assertModuleSource)
def testGetCompiledSource(self):
pyc = make_pyc(compile(test_src, "<???>", "exec"), NOW)
files = {TESTMOD + ".py": (NOW, test_src),
TESTMOD + pyc_ext: (NOW, pyc)}
self.doTest(pyc_ext, files, TESTMOD, call=self.assertModuleSource)
def runDoctest(self, callback):
files = {TESTMOD + ".py": (NOW, test_src),
"xyz.txt": (NOW, ">>> log.append(True)\n")}
self.doTest(".py", files, TESTMOD, call=callback)
def doDoctestFile(self, module):
log = []
old_master, doctest.master = doctest.master, None
try:
doctest.testfile(
'xyz.txt', package=module, module_relative=True,
globs=locals()
)
finally:
doctest.master = old_master
self.assertEqual(log,[True])
def testDoctestFile(self):
self.runDoctest(self.doDoctestFile)
def doDoctestSuite(self, module):
log = []
doctest.DocFileTest(
'xyz.txt', package=module, module_relative=True,
globs=locals()
).run()
self.assertEqual(log,[True])
def testDoctestSuite(self):
self.runDoctest(self.doDoctestSuite)
def doTraceback(self, module):
try:
module.do_raise()
except:
tb = sys.exc_info()[2].tb_next
f,lno,n,line = extract_tb(tb, 1)[0]
self.assertEqual(line, raise_src.strip())
f,lno,n,line = extract_stack(tb.tb_frame, 1)[0]
self.assertEqual(line, raise_src.strip())
s = StringIO.StringIO()
print_tb(tb, 1, s)
self.failUnless(s.getvalue().endswith(raise_src))
else:
raise AssertionError("This ought to be impossible")
def testTraceback(self):
files = {TESTMOD + ".py": (NOW, raise_src)}
self.doTest(None, files, TESTMOD, call=self.doTraceback)
class CompressedZipImportTestCase(UncompressedZipImportTestCase):
compression = ZIP_DEFLATED
class BadFileZipImportTestCase(unittest.TestCase):
def assertZipFailure(self, filename):
self.assertRaises(zipimport.ZipImportError,
zipimport.zipimporter, filename)
def testNoFile(self):
self.assertZipFailure('AdfjdkFJKDFJjdklfjs')
def testEmptyFilename(self):
self.assertZipFailure('')
def testBadArgs(self):
self.assertRaises(TypeError, zipimport.zipimporter, None)
self.assertRaises(TypeError, zipimport.zipimporter, TESTMOD, kwd=None)
def testFilenameTooLong(self):
self.assertZipFailure('A' * 33000)
def testEmptyFile(self):
test_support.unlink(TESTMOD)
open(TESTMOD, 'w+').close()
self.assertZipFailure(TESTMOD)
def testFileUnreadable(self):
test_support.unlink(TESTMOD)
fd = os.open(TESTMOD, os.O_CREAT, 000)
try:
os.close(fd)
self.assertZipFailure(TESTMOD)
finally:
# If we leave "the read-only bit" set on Windows, nothing can
# delete TESTMOD, and later tests suffer bogus failures.
os.chmod(TESTMOD, 0666)
test_support.unlink(TESTMOD)
def testNotZipFile(self):
test_support.unlink(TESTMOD)
fp = open(TESTMOD, 'w+')
fp.write('a' * 22)
fp.close()
self.assertZipFailure(TESTMOD)
# XXX: disabled until this works on Big-endian machines
def _testBogusZipFile(self):
test_support.unlink(TESTMOD)
fp = open(TESTMOD, 'w+')
fp.write(struct.pack('=I', 0x06054B50))
fp.write('a' * 18)
fp.close()
z = zipimport.zipimporter(TESTMOD)
try:
self.assertRaises(TypeError, z.find_module, None)
self.assertRaises(TypeError, z.load_module, None)
self.assertRaises(TypeError, z.is_package, None)
self.assertRaises(TypeError, z.get_code, None)
self.assertRaises(TypeError, z.get_data, None)
self.assertRaises(TypeError, z.get_source, None)
error = zipimport.ZipImportError
self.assertEqual(z.find_module('abc'), None)
self.assertRaises(error, z.load_module, 'abc')
self.assertRaises(error, z.get_code, 'abc')
self.assertRaises(IOError, z.get_data, 'abc')
self.assertRaises(error, z.get_source, 'abc')
self.assertRaises(error, z.is_package, 'abc')
finally:
zipimport._zip_directory_cache.clear()
def cleanup():
# this is necessary if test is run repeated (like when finding leaks)
global test_imported
if test_imported:
zipimport._zip_directory_cache.clear()
if hasattr(UncompressedZipImportTestCase, 'testAFakeZlib'):
delattr(UncompressedZipImportTestCase, 'testAFakeZlib')
if hasattr(CompressedZipImportTestCase, 'testAFakeZlib'):
delattr(CompressedZipImportTestCase, 'testAFakeZlib')
test_imported = True
def test_main():
cleanup()
try:
test_support.run_unittest(
UncompressedZipImportTestCase,
CompressedZipImportTestCase,
BadFileZipImportTestCase,
)
finally:
test_support.unlink(TESTMOD)
if __name__ == "__main__":
test_main()
| apache-2.0 |
eyohansa/django | django/conf/locale/hu/formats.py | 504 | 1117 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y. F j.'
TIME_FORMAT = 'G.i'
DATETIME_FORMAT = 'Y. F j. G.i'
YEAR_MONTH_FORMAT = 'Y. F'
MONTH_DAY_FORMAT = 'F j.'
SHORT_DATE_FORMAT = 'Y.m.d.'
SHORT_DATETIME_FORMAT = 'Y.m.d. G.i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y.%m.%d.', # '2006.10.25.'
]
TIME_INPUT_FORMATS = [
'%H.%M.%S', # '14.30.59'
'%H.%M', # '14.30'
]
DATETIME_INPUT_FORMATS = [
'%Y.%m.%d. %H.%M.%S', # '2006.10.25. 14.30.59'
'%Y.%m.%d. %H.%M.%S.%f', # '2006.10.25. 14.30.59.000200'
'%Y.%m.%d. %H.%M', # '2006.10.25. 14.30'
'%Y.%m.%d.', # '2006.10.25.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = ' ' # Non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
reimandlab/ActiveDriverDB | website/imports/mutations/clinvar.py | 1 | 27628 | import re
from collections import defaultdict
from typing import Mapping, Iterable, Dict, TextIO, Union, NamedTuple
from xml.etree import ElementTree
from sqlalchemy.orm.exc import NoResultFound
from models import InheritedMutation, Disease
from models import ClinicalData, or_
from helpers.parsers import tsv_file_iterator
from helpers.parsers import gzip_open_text
from database.bulk import get_highest_id, bulk_orm_insert, restart_autoincrement
from database import db
from .mutation_importer import MutationImporter
from .mutation_importer.helpers import make_metadata_ordered_dict
class MalformedRawError(Exception):
pass
def count_characters(file: TextIO):
return sum(
len(line)
for line in file
)
class ReferenceData(NamedTuple):
rcv_accession: ElementTree.Element
variation_id: int
sample: ElementTree.Element
class ClinVarImporter(MutationImporter):
name = 'clinvar'
model = InheritedMutation
default_path = 'data/mutations/clinvar_muts_annotated.txt.gz'
default_xml_path = 'data/mutations/ClinVarFullRelease_2020-10.xml.gz'
header = [
'Chr', 'Start', 'End', 'Ref', 'Alt', 'Func.refGene', 'Gene.refGene',
'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',
'Otherinfo1', 'Otherinfo2', 'Otherinfo3', 'Otherinfo4', 'Otherinfo5',
'Otherinfo6', 'Otherinfo7', 'Otherinfo8', 'Otherinfo9', 'Otherinfo10', 'Otherinfo11'
]
insert_keys = (
'mutation_id',
'db_snp_ids',
'combined_significances',
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.xml_path = None
self.skipped_significances = defaultdict(int)
self.variants_of_interest = None
self.skipped_variation_types = set()
self.skipped_species = set()
def load(self, path=None, update=False, clinvar_xml_path=None, **ignored_kwargs):
print(
'Please note that the annovar and XML database needs to be based on the same ClinVar release'
' to avoid incorrect removal of variants which are missing metadata (i.e. not found in the XML file)'
)
self.xml_path = clinvar_xml_path or self.default_xml_path
super().load(path, update, **ignored_kwargs)
@staticmethod
def _beautify_disease_name(name):
if '___' in name:
# for edge cases use a robust regexpr
name = re.sub('([^_])_([^_])', r'\1 \2', name).replace('___', ' _ ')
else:
# but for 99% of data, simple .replace() is much faster:
name = name.replace('_', ' ')
return name.replace('%3B', ';')
def iterate_lines(self, path):
return tsv_file_iterator(path, self.header, file_opener=gzip_open_text)
def test_line(self, line):
try:
at_least_one_significant_sub_entry, *args = self.parse_metadata(line)
return at_least_one_significant_sub_entry
except MalformedRawError:
return False
accepted_assertions = {
'variation to disease',
'variation in modifier gene to disease',
'variation to included disease',
'confers sensitivity'
}
accepted_species = {'Human', 'human'}
clinvar_keys = (
'RS',
'CLNDISDB',
'CLNDN',
'CLNSIG',
'CLNSIGCONF'
)
disease_id_clinvar_to_db = {
'MedGen': 'medgen_id',
'OMIM': 'omim_id',
'SNOMED_CT': 'snomed_ct_id',
'Orphanet': 'orhpanet_id',
'Human_Phenotype_Ontology': 'hpo_id'
}
inverse_significance_map: Mapping[str, int] = {
name.lower(): code
for code, name in ClinicalData.significance_codes.items()
}
significance_map = {
'pathologic': 'pathogenic',
'probable-pathogenic': 'likely pathogenic',
'cancer': 'pathogenic',
'untested': 'not provided',
'variant of unknown significance': 'uncertain significance',
'uncertain': 'uncertain significance',
'drug-response': 'drug response',
'probable-non-pathogenic': 'likely benign',
'probably not pathogenic': 'likely benign',
'non-pathogenic': 'benign',
}
def _parse_and_filter_reference(self, reference) -> Union[ReferenceData, None]:
assertion = reference.find('Assertion').attrib['Type']
# skip over "confers resistance" and "variant to named protein"
if assertion not in self.accepted_assertions:
assert assertion in {'confers resistance', 'variant to named protein'}
return
# variant-disease accession
rcv_accession = reference.find('ClinVarAccession')
assert rcv_accession.attrib['Type'] == 'RCV'
# rcv_accession = rcv_accession.attrib['Acc']
# variation or variation set, corresponds to InheritedMutation in our database
variation_set = reference.find('MeasureSet')
# skip over minority of records with "GenotypeSet"s
if not variation_set:
assert reference.find('GenotypeSet')
return
# skip over haplotypes, etc
variation_set_type = variation_set.attrib['Type']
if variation_set_type != 'Variant':
assert variation_set_type in {'Haplotype', 'Distinct chromosomes', 'Phase unknown'}
return
# corresponds to InheritedMutation.variation_id
variation_id = int(variation_set.attrib['ID'])
if variation_id not in self.variants_of_interest:
# as we effectively have only a fraction of all variations
# (non-synonymous SNVs only), this will speed things up
return
sample = reference.find('ObservedIn/Sample')
assert sample
species = sample.find('Species').text
if species not in self.accepted_species:
if species not in self.skipped_species:
print(f'Skipping non-human species: "{species}"')
self.skipped_species.add(species)
return
variations = variation_set.findall('Measure')
assert len(variations) == 1
variation = variations[0]
variation_type = variation.attrib['Type']
if variation_type != 'single nucleotide variant':
if variation_type not in self.skipped_variation_types:
print(f'Skipping variation type: {variation_type}')
self.skipped_variation_types.add(variation_type)
# TODO: it seems that it should include a short-circuiting return,
# see https://github.com/reimandlab/ActiveDriverDB/issues/169
return ReferenceData(
variation_id=variation_id,
rcv_accession=rcv_accession,
sample=sample
)
def parse_significance(self, significance):
significance = significance.lower()
if significance in self.significance_map:
significance = self.significance_map[significance]
additional_significances = []
if significance not in self.inverse_significance_map:
assign_to = 'other'
first_significance, *additional_significances = significance.split(',')
if first_significance in self.inverse_significance_map:
assign_to = first_significance
if significance not in self.skipped_significances:
print(f'Unmapped significance status: "{significance}", assigning "{assign_to}"')
self.skipped_significances[significance] += 1
significance = assign_to
sig_code = self.inverse_significance_map[significance]
return sig_code, [sig.strip() for sig in additional_significances]
def import_disease_associations(self):
"""Add disease association details to the already imported mutation-disease associations"""
from tqdm import tqdm
import gzip
ignored_traits = {
'not specified',
'not provided'
}
print('Only including assertions: ', self.accepted_assertions)
print('Ignoring traits: ', ignored_traits)
self.skipped_significances = defaultdict(int)
self.skipped_species = set()
self.skipped_variation_types = set()
conflicting_types = set()
skipped_diseases = set()
print('Collecting identifiers of variants to consider...')
variants_of_interest = {
variation_id
for variation_id, in db.session.query(ClinicalData.variation_id)
}
print('Identifiers collection done.')
# otherwise there is no point...
assert variants_of_interest
opener = gzip.open if self.xml_path.endswith('.gz') else open
with opener(self.xml_path) as clinvar_full_release:
total_size = count_characters(clinvar_full_release)
step = 0
self.variants_of_interest = {
variation_id
for variation_id, in db.session.query(ClinicalData.variation_id)
}
diseases: Dict[str, Disease] = {
disease.name.lower(): disease
for disease in Disease.query.all()
}
with opener(self.xml_path) as clinvar_full_release:
tree = iter(ElementTree.iterparse(clinvar_full_release, events=('start', 'end')))
event, root = next(tree)
progress_bar = tqdm(total=1000)
last_progress = 0
for event, element in tree:
if event != 'end' or element.tag != 'ClinVarSet':
continue
reference = element.find('ReferenceClinVarAssertion')
reference_data = self._parse_and_filter_reference(reference)
# skip if filtered out
if not reference_data:
continue
assert reference.find('RecordStatus').text == 'current'
origin = reference_data.sample.find('Origin').text
# Disease or observation, corresponds to Disease
trait = reference.find('TraitSet')
trait_type = trait.attrib['Type']
trait_name = trait.find('Trait/Name/ElementValue').text
if trait_name in ignored_traits:
continue
if trait_name.lower() in diseases:
disease = diseases[trait_name.lower()]
else:
resolved = False
if 'Mucolipidosis, Type' in trait_name:
print(f'Working around changed name for {trait_name}')
trait_name = trait_name.replace('Mucolipidosis, Type', 'Mucolipidosis')
if trait_name.lower() in diseases:
disease = diseases[trait_name.lower()]
print('Workaround was successful')
resolved = True
else:
print('Workaround did not help')
if not resolved:
skipped_diseases.add(trait_name)
print(f'Disease "{trait_name}" entry not found, skipping')
continue
if disease.clinvar_type:
if disease.clinvar_type != trait_type:
if disease.name not in conflicting_types:
conflicting_types.add(disease.name)
action = ''
if trait_type == 'Disease':
disease.clinvar_type = trait_type
action = ': overwriting the old type with "Disease"'
print(
f'Conflicting trait types for "{disease.name}":'
f' "{disease.clinvar_type}" != "{trait_type}"{action}'
)
else:
disease.clinvar_type = trait_type
significance_annotations = reference.findall('ClinicalSignificance')
assert len(significance_annotations) == 1
significance_annotation = significance_annotations[0]
significance = significance_annotation.find('Description').text
review_status = significance_annotation.find('ReviewStatus').text
sig_code, additional_significances = self.parse_significance(significance)
disease_associations: Iterable[ClinicalData] = (
ClinicalData.query
.filter(ClinicalData.disease_id == disease.id)
.filter(ClinicalData.variation_id == reference_data.variation_id)
)
association_values = {
'sig_code': sig_code,
'rev_status': review_status,
'origin': origin
}
for disease_association in disease_associations:
for key, value in association_values.items():
old_value = getattr(disease_association, key)
if old_value and old_value != value:
print(
f'Warning: {key} was already set to {old_value}'
f' for {reference_data.variation_id}/{disease},'
f' while the new value is {value}'
f' (accession: {reference_data.rcv_accession})'
)
disease_association.sig_code = sig_code
disease_association.rev_status = review_status
# IMPORTANT: every "continue" up to this point means that the mutation
# will be removed in remove_muts_without_origin(), because origin will not be set
disease_association.origin = origin
if additional_significances:
disease_association.additional_significances = set(additional_significances)
element.clear()
step += 1
if step % 550 == 0:
progress = int(clinvar_full_release.tell() / total_size * 1000)
if progress != last_progress:
progress_bar.update(progress - last_progress)
last_progress = progress
root.clear()
print(skipped_diseases)
print(self.skipped_significances)
db.session.commit()
def remove_muts_without_origin(self):
origin_exclusion_list = {
'not applicable',
'not provided',
'not-reported',
'somatic',
'tested-inconclusive',
'unknown'
}
print('ClinVar mutations origin exclusion list: ', origin_exclusion_list)
print('Removing ClinVar associations with blacklisted or missing origin; NOTE:')
print('\torigin is not set also when the mutation was skipped due to other reasons, such as non-human species')
removed_cnt = ClinicalData.query.filter(
or_(
ClinicalData.origin == None, # noqa: E711
ClinicalData.origin.in_(origin_exclusion_list)
)
).delete(synchronize_session='fetch')
db.session.commit()
print(f'Removed {removed_cnt} associations')
print('Removing orphaned ClinVar mutations (with no associations)')
empty_mutations_cnt = InheritedMutation.query.filter(~InheritedMutation.clin_data.any()).delete(
synchronize_session='fetch'
)
db.session.commit()
print(f'Removed {empty_mutations_cnt} ClinVar mutations without associations')
print('Removing diseases without associations...')
removed_diseases = Disease.query.filter(~Disease.associations.any()).delete(synchronize_session='fetch')
print(f'removing {removed_diseases} diseases')
db.session.commit()
def _load(self, path, update, **kwargs):
skip_removal = kwargs.pop('skip_removal', False)
super()._load(path, update, **kwargs)
self.import_disease_associations()
if not skip_removal:
self.remove_muts_without_origin()
def parse_metadata(self, line):
metadata = line[20].split(';')
clinvar_entry = make_metadata_ordered_dict(self.clinvar_keys, metadata)
disease_names, diseases_ids, combined_significances, significances_set = (
(entry.split('|') if entry else [])
for entry in
(
clinvar_entry[key]
for key in ('CLNDN', 'CLNDISDB', 'CLNSIG', 'CLNSIGCONF')
)
)
diseases_ids_map = [
{
key: ':'.join(values)
# needed as some ids have colons inside, e.g.:
# CLNDISDB=Human_Phenotype_Ontology:HP:0002145
for disease_id in disease_ids.split(',')
for key, *values in [disease_id.split(':')]
}
for disease_ids in diseases_ids
]
diseases_ids = [
[
disease_ids_map.get(disease_id_clinvar, None)
for disease_id_clinvar in self.disease_id_clinvar_to_db
]
for disease_ids_map in diseases_ids_map
]
combined_significances = [
significance.replace('_', ' ')
for significance in combined_significances
]
assert len(combined_significances) <= 1
assert not significances_set or len(significances_set) == 1
# those lengths should be always equal
assert len(diseases_ids) == len(disease_names)
sub_entries_cnt = len(disease_names)
at_least_one_meaningful_sub_entry = False
for i in range(sub_entries_cnt):
try:
if disease_names:
if disease_names[i] not in ('not_specified', 'not_provided'):
disease_names[i] = self._beautify_disease_name(disease_names[i])
at_least_one_meaningful_sub_entry = True
except IndexError:
raise MalformedRawError(f'Malformed row (wrong count of sub-entries) on {i}-th entry:')
variation_id = int(line[15])
return (
at_least_one_meaningful_sub_entry, clinvar_entry, sub_entries_cnt,
disease_names, diseases_ids, combined_significances, variation_id
)
@classmethod
def compare_ids(cls, recorded_ids, recorded_name, disease_ids, name):
"""recorded = old"""
out = None
different_ids = [
id_label
for i, (id_label, id_name) in enumerate(cls.disease_id_clinvar_to_db.items())
if str(recorded_ids[i]) != str(disease_ids[i])
]
if any(different_ids):
new_ids = dict(zip(cls.disease_id_clinvar_to_db, disease_ids))
old_ids = dict(zip(cls.disease_id_clinvar_to_db, recorded_ids))
different_ids_values = ', '.join([
f'{id_label}: {old_ids[id_label]} (old) vs {new_ids[id_label]} (new)'
for id_label in different_ids
])
same_ids_labels = ', '.join(
f'{id_label} ({old_ids[id_label]})'
for id_label in cls.disease_id_clinvar_to_db
if id_label not in different_ids and old_ids[id_label]
)
out = (
f'Note: {name} identifiers differ from {recorded_name} identifiers'
f' {different_ids_values}. The following remain the same: {same_ids_labels}.'
)
# not all ids differ (at least some are the same)
assert len(different_ids) != len(recorded_ids)
return out, different_ids
def parse(self, path):
clinvar_mutations = []
clinvar_data = []
duplicates = 0
new_diseases = {}
highest_disease_id = get_highest_id(Disease)
def clinvar_parser(line):
nonlocal highest_disease_id, duplicates
try:
(
at_least_one_significant_sub_entry, clinvar_entry, sub_entries_cnt,
disease_names, diseases_ids, combined_significances, variation_id
) = self.parse_metadata(line)
except MalformedRawError as e:
print(str(e) + '\n', line)
return False
# following 2 lines are result of issue #47 - we don't import those
# clinvar mutations that do not have any diseases specified:
if not at_least_one_significant_sub_entry:
return
values = list(clinvar_entry.values())
# should correspond to insert keys!
clinvar_mutation_values = [
{int(rs) for rs in (clinvar_entry['RS'] or '').split('|') if rs},
set(combined_significances)
]
for mutation_id in self.get_or_make_mutations(line):
# take care of duplicates
duplicated = self.look_after_duplicates(mutation_id, clinvar_mutations, values[:1])
if duplicated:
duplicates += 1
continue
# take care of nearly-duplicates
same_mutation_pointers = self.mutations_details_pointers_grouped_by_unique_mutations[mutation_id]
assert len(same_mutation_pointers) <= 1
if same_mutation_pointers:
pointer = same_mutation_pointers[0]
self.merge_into_old_snv(
old_values=clinvar_mutations[pointer],
new_values=clinvar_mutation_values,
mutation_id=mutation_id
)
else:
# only add the protein-level mutation once
self.protect_from_duplicates(mutation_id, clinvar_mutations)
clinvar_mutations.append([mutation_id, *clinvar_mutation_values])
# then add the disease-mutation relations;
# if these are caused by multiple SNVs (and thus have different variant_id),
# add them for each of SNVs separately as each can have different sig_code:
for i in range(sub_entries_cnt):
# disease names matching is case insensitive;
# NB: MySQL uses case-insensitive unique constraint by default
name = disease_names[i]
disease_ids = diseases_ids[i]
key = name.lower()
merged = False
# we don't want _uninteresting_ data
if name in ('not_specified', 'not_provided'):
continue
if key in new_diseases:
disease_id, (recorded_name, *recorded_ids) = new_diseases[key]
merged = True
disease = None
else:
try:
disease = Disease.query.filter(Disease.name.ilike(name)).one()
disease_id = disease.id
recorded_name = disease.name
recorded_ids = [
getattr(disease, id_name, None)
for id_name in self.disease_id_clinvar_to_db.values()
]
merged = True
except NoResultFound:
highest_disease_id += 1
new_diseases[key] = highest_disease_id, (name, *disease_ids)
disease_id = highest_disease_id
if merged:
if recorded_name != name:
print(
f'Note: {name} and {recorded_name} diseases were merged'
f' (identical in case-insensitive comparison)'
)
notice, different_ids = self.compare_ids(recorded_ids, recorded_name, disease_ids, name)
if notice:
print(notice)
if different_ids:
if disease:
new_ids = dict(zip(self.disease_id_clinvar_to_db, disease_ids))
for id_to_update in different_ids:
setattr(disease, id_to_update, new_ids[id_to_update])
db.session.commit()
print(f'The ids of the {recorded_name} were updated.')
else:
print(
'No ids updates were performed as the ids came from newly added disease;'
' this might be a ClinVar integrity issue.'
)
clinvar_data.append(
(
len(clinvar_mutations),
disease_id,
variation_id
)
)
for line in self.iterate_lines(path):
clinvar_parser(line)
print(f'{duplicates} duplicates found')
return clinvar_mutations, clinvar_data, new_diseases.values()
def export_details_headers(self):
return ['disease', 'significance', 'has_significance_conflict']
def export_details(self, mutation):
return [
[d.disease_name, d.significance or '', str(d.has_significance_conflict)]
for d in mutation.clin_data
]
def insert_details(self, details):
clinvar_mutations, clinvar_data, new_diseases = details
disease_columns = ('name', *self.disease_id_clinvar_to_db.values())
bulk_orm_insert(
Disease,
disease_columns,
[disease_data for pk, disease_data in new_diseases]
)
self.insert_list(clinvar_mutations)
bulk_orm_insert(
ClinicalData,
('inherited_id', 'disease_id', 'variation_id'),
clinvar_data
)
def restart_autoincrement(self, model):
assert self.model == model
for model in [self.model, ClinicalData, Disease]:
restart_autoincrement(model)
db.session.commit()
def raw_delete_all(self, model):
assert self.model == model
# remove clinical data
data_cnt = ClinicalData.query.delete()
# remove diseases
disease_cnt = Disease.query.delete()
print(f'{disease_cnt} diseases and {data_cnt} clinical data entries removed')
# then mutations
count = self.model.query.delete()
# count of removed mutations is more informative
return count
def merge_into_old_snv(self, old_values, new_values, mutation_id):
old = self.data_as_dict(old_values)
new = self.data_as_dict(new_values, mutation_id=mutation_id)
for key in ['db_snp_ids', 'combined_significances']:
index = self.insert_keys.index(key)
old_values[index].update(new[key])
print(f'Merged SNVs of the same protein mutation ({mutation_id}):\n\t{new}\nand\n\t{old}\n')
| lgpl-2.1 |
sumedhasingla/VTK | IO/Geometry/Testing/Python/Plot3DScalars.py | 20 | 3940 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
#
# All Plot3D scalar functions
#
# Create the RenderWindow, Renderer and both Actors
#
renWin = vtk.vtkRenderWindow()
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
scalarLabels = ["Density", "Pressure", "Temperature", "Enthalpy",
"Internal_Energy", "Kinetic_Energy", "Velocity_Magnitude",
"Stagnation_Energy", "Entropy", "Swirl"]
scalarFunctions = ["100", "110", "120", "130",
"140", "144", "153",
"163", "170", "184"]
camera = vtk.vtkCamera()
light = vtk.vtkLight()
math = vtk.vtkMath()
# All text actors will share the same text prop
textProp = vtk.vtkTextProperty()
textProp.SetFontSize(10)
textProp.SetFontFamilyToArial()
textProp.SetColor(0, 0, 0)
i = 0
for scalarFunction in scalarFunctions:
exec("pl3d" + scalarFunction + " = vtk.vtkMultiBlockPLOT3DReader()")
eval("pl3d" + scalarFunction).SetXYZFileName(
VTK_DATA_ROOT + "/Data/bluntfinxyz.bin")
eval("pl3d" + scalarFunction).SetQFileName(
VTK_DATA_ROOT + "/Data/bluntfinq.bin")
eval("pl3d" + scalarFunction).SetScalarFunctionNumber(int(scalarFunction))
eval("pl3d" + scalarFunction).Update()
output = eval("pl3d" + scalarFunction).GetOutput().GetBlock(0)
exec("plane" + scalarFunction + " = vtk.vtkStructuredGridGeometryFilter()")
eval("plane" + scalarFunction).SetInputData(output)
eval("plane" + scalarFunction).SetExtent(25, 25, 0, 100, 0, 100)
exec("mapper" + scalarFunction + " = vtk.vtkPolyDataMapper()")
eval("mapper" + scalarFunction).SetInputConnection(
eval("plane" + scalarFunction).GetOutputPort())
eval("mapper" + scalarFunction).SetScalarRange(
output.GetPointData().GetScalars().GetRange())
exec("actor" + scalarFunction + " = vtk.vtkActor()")
eval("actor" + scalarFunction).SetMapper(eval("mapper" + scalarFunction))
exec("ren" + scalarFunction + " = vtk.vtkRenderer()")
eval("ren" + scalarFunction).SetBackground(0, 0, .5)
eval("ren" + scalarFunction).SetActiveCamera(camera)
eval("ren" + scalarFunction).AddLight(light)
renWin.AddRenderer(eval("ren" + scalarFunction))
eval("ren" + scalarFunction).SetBackground(
math.Random(.5, 1), math.Random(.5, 1), math.Random(.5, 1))
eval("ren" + scalarFunction).AddActor(eval("actor" + scalarFunction))
exec("textMapper" + scalarFunction + " = vtk.vtkTextMapper()")
eval("textMapper" + scalarFunction).SetInput(scalarLabels[i])
eval("textMapper" + scalarFunction).SetTextProperty(textProp)
# exec("text" + scalarFunction + " = vtk.vtkActor2D()")
# eval("text" + scalarFunction).SetMapper(eval("textMapper" + scalarFunction))
# eval("text" + scalarFunction).SetPosition(2, 3)
#
# eval("ren" + scalarFunction).AddActor2D(eval("text" + scalarFunction))
i += 1
#
# now layout the renderers
column = 1
row = 1
deltaX = 1.0 / 5.0
deltaY = 1.0 / 2.0
for scalarFunction in scalarFunctions:
eval("ren" + scalarFunction).SetViewport(
(column - 1) * deltaX, (row - 1) * deltaY, column * deltaX, row * deltaY)
column += 1
if (column > 5):
column = 1
row += 1
camera.SetViewUp(0, 1, 0)
camera.SetFocalPoint(0, 0, 0)
camera.SetPosition(1, 0, 0)
ren100.ResetCamera()
camera.Dolly(1.25)
ren100.ResetCameraClippingRange()
ren110.ResetCameraClippingRange()
ren120.ResetCameraClippingRange()
ren130.ResetCameraClippingRange()
ren140.ResetCameraClippingRange()
ren144.ResetCameraClippingRange()
ren153.ResetCameraClippingRange()
ren163.ResetCameraClippingRange()
ren170.ResetCameraClippingRange()
ren184.ResetCameraClippingRange()
light.SetPosition(camera.GetPosition())
light.SetFocalPoint(camera.GetFocalPoint())
renWin.SetSize(600, 180)
renWin.Render()
# render the image
#
iren.Initialize()
# iren.Start()
| bsd-3-clause |
tliber/scrapy | tests/test_logformatter.py | 80 | 2131 | import unittest
import six
from scrapy.spiders import Spider
from scrapy.http import Request, Response
from scrapy.item import Item, Field
from scrapy.logformatter import LogFormatter
class CustomItem(Item):
name = Field()
def __str__(self):
return "name: %s" % self['name']
class LoggingContribTest(unittest.TestCase):
def setUp(self):
self.formatter = LogFormatter()
self.spider = Spider('default')
def test_crawled(self):
req = Request("http://www.example.com")
res = Response("http://www.example.com")
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws['msg'] % logkws['args']
self.assertEqual(logline,
"Crawled (200) <GET http://www.example.com> (referer: None)")
req = Request("http://www.example.com", headers={'referer': 'http://example.com'})
res = Response("http://www.example.com", flags=['cached'])
logkws = self.formatter.crawled(req, res, self.spider)
logline = logkws['msg'] % logkws['args']
self.assertEqual(logline,
"Crawled (200) <GET http://www.example.com> (referer: http://example.com) ['cached']")
def test_dropped(self):
item = {}
exception = Exception(u"\u2018")
response = Response("http://www.example.com")
logkws = self.formatter.dropped(item, exception, response, self.spider)
logline = logkws['msg'] % logkws['args']
lines = logline.splitlines()
assert all(isinstance(x, six.text_type) for x in lines)
self.assertEqual(lines, [u"Dropped: \u2018", '{}'])
def test_scraped(self):
item = CustomItem()
item['name'] = u'\xa3'
response = Response("http://www.example.com")
logkws = self.formatter.scraped(item, response, self.spider)
logline = logkws['msg'] % logkws['args']
lines = logline.splitlines()
assert all(isinstance(x, six.text_type) for x in lines)
self.assertEqual(lines, [u"Scraped from <200 http://www.example.com>", u'name: \xa3'])
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
dllsf/odootest | addons/project_issue_sheet/__openerp__.py | 120 | 1851 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Timesheet on Issues',
'version': '1.0',
'category': 'Project Management',
'description': """
This module adds the Timesheet support for the Issues/Bugs Management in Project.
=================================================================================
Worklogs can be maintained to signify number of hours spent by users to handle an issue.
""",
'author': 'OpenERP SA',
'website': 'http://www.openerp.com',
'images': ['images/project_issue_sheet_worklog.jpeg'],
'depends': [
'project_issue',
'hr_timesheet_sheet',
],
'data': [
'project_issue_sheet_view.xml',
'security/ir.model.access.csv',
],
'demo': [],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gangadhar-kadam/verve_live_erp | erpnext/projects/doctype/time_log/test_time_log.py | 5 | 3118 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
from __future__ import unicode_literals
import frappe
import unittest
from erpnext.projects.doctype.time_log.time_log import OverlapError
from erpnext.projects.doctype.time_log.time_log import NotSubmittedError
from erpnext.manufacturing.doctype.workstation.workstation import WorkstationHolidayError
from erpnext.manufacturing.doctype.workstation.workstation import NotInWorkingHoursError
from erpnext.projects.doctype.time_log_batch.test_time_log_batch import *
class TestTimeLog(unittest.TestCase):
def test_duplication(self):
frappe.db.sql("delete from `tabTime Log`")
tl1 = frappe.get_doc(frappe.copy_doc(test_records[0]))
tl1.user = "test@example.com"
tl1.insert()
tl2 = frappe.get_doc(frappe.copy_doc(test_records[0]))
tl2.user = "test@example.com"
self.assertRaises(OverlapError, tl2.insert)
frappe.db.sql("delete from `tabTime Log`")
def test_production_order_status(self):
prod_order = make_prod_order(self)
prod_order.save()
time_log = frappe.get_doc({
"doctype": "Time Log",
"time_log_for": "Manufacturing",
"production_order": prod_order.name,
"qty": 1,
"from_time": "2014-12-26 00:00:00",
"to_time": "2014-12-26 00:00:00"
})
self.assertRaises(NotSubmittedError, time_log.save)
def test_time_log_on_holiday(self):
prod_order = make_prod_order(self)
prod_order.set_production_order_operations()
prod_order.save()
prod_order.submit()
time_log = frappe.get_doc({
"doctype": "Time Log",
"time_log_for": "Manufacturing",
"production_order": prod_order.name,
"operation": prod_order.operations[0].operation,
"operation_id": prod_order.operations[0].name,
"qty": 1,
"activity_type": "_Test Activity Type",
"from_time": "2013-02-01 10:00:00",
"to_time": "2013-02-01 20:00:00",
"workstation": "_Test Workstation 1"
})
self.assertRaises(WorkstationHolidayError , time_log.save)
time_log.update({
"from_time": "2013-02-02 09:00:00",
"to_time": "2013-02-02 20:00:00"
})
self.assertRaises(NotInWorkingHoursError , time_log.save)
time_log.from_time= "2013-02-02 10:30:00"
time_log.save()
time_log.submit()
time_log.cancel()
def test_negative_hours(self):
frappe.db.sql("delete from `tabTime Log`")
test_time_log = frappe.new_doc("Time Log")
test_time_log.activity_type = "Communication"
test_time_log.from_time = "2013-01-01 11:00:00.000000"
test_time_log.to_time = "2013-01-01 10:00:00.000000"
self.assertRaises(frappe.ValidationError, test_time_log.save)
frappe.db.sql("delete from `tabTime Log`")
def make_prod_order(self):
return frappe.get_doc({
"doctype":"Production Order",
"production_item": "_Test FG Item 2",
"bom_no": "BOM/_Test FG Item 2/001",
"qty": 1,
"wip_warehouse": "_Test Warehouse - _TC",
"fg_warehouse": "_Test Warehouse 1 - _TC",
"company": "_Test Company"
})
test_records = frappe.get_test_records('Time Log')
test_ignore = ["Time Log Batch", "Sales Invoice"]
| agpl-3.0 |
Yukarumya/Yukarum-Redfoxes | testing/mozbase/mozdevice/adb_tests/test_devicemanagerADB.py | 1 | 7913 | """
Info:
This tests DeviceManagerADB with a real device
Requirements:
- You must have a device connected
- It should be listed under 'adb devices'
Notes:
- Not all functions have been covered.
In particular, functions from the parent class
- No testing of properties is done
- The test case are very simple and it could be
done with deeper inspection of the return values
Author(s):
- Armen Zambrano <armenzg@mozilla.com>
Functions that are not being tested:
- launchProcess - DEPRECATED
- getIP
- recordLogcat
- saveScreenshot
- validateDir
- mkDirs
- getDeviceRoot
- shellCheckOutput
- processExist
I assume these functions are only useful for Android
- getAppRoot()
- updateApp()
- uninstallApp()
- uninstallAppAndReboot()
"""
import os
import re
import socket
import sys
import tempfile
import unittest
from StringIO import StringIO
from mozdevice import DeviceManagerADB, DMError
def find_mount_permissions(dm, mount_path):
for mount_point in dm._runCmd(["shell", "mount"]).output:
if mount_point.find(mount_path) > 0:
return re.search('(ro|rw)(?=,)', mount_point).group(0)
class DeviceManagerADBTestCase(unittest.TestCase):
tempLocalDir = "tempDir"
tempLocalFile = os.path.join(tempLocalDir, "tempfile.txt")
tempRemoteDir = None
tempRemoteFile = None
tempRemoteSystemFile = None
def setUp(self):
self.assertTrue(find_mount_permissions(self.dm, "/system"), "ro")
self.assertTrue(os.path.exists(self.tempLocalDir))
self.assertTrue(os.path.exists(self.tempLocalFile))
if self.dm.fileExists(self.tempRemoteFile):
self.dm.removeFile(self.tempRemoteFile)
self.assertFalse(self.dm.fileExists(self.tempRemoteFile))
if self.dm.fileExists(self.tempRemoteSystemFile):
self.dm.removeFile(self.tempRemoteSystemFile)
self.assertTrue(self.dm.dirExists(self.tempRemoteDir))
@classmethod
def setUpClass(self):
self.dm = DeviceManagerADB()
if not os.path.exists(self.tempLocalDir):
os.mkdir(self.tempLocalDir)
if not os.path.exists(self.tempLocalFile):
# Create empty file
open(self.tempLocalFile, 'w').close()
self.tempRemoteDir = self.dm.getTempDir()
self.tempRemoteFile = os.path.join(self.tempRemoteDir,
os.path.basename(self.tempLocalFile))
self.tempRemoteSystemFile = \
os.path.join("/system", os.path.basename(self.tempLocalFile))
@classmethod
def tearDownClass(self):
os.remove(self.tempLocalFile)
os.rmdir(self.tempLocalDir)
if self.dm.dirExists(self.tempRemoteDir):
# self.tempRemoteFile will get deleted with it
self.dm.removeDir(self.tempRemoteDir)
if self.dm.fileExists(self.tempRemoteSystemFile):
self.dm.removeFile(self.tempRemoteSystemFile)
class TestFileOperations(DeviceManagerADBTestCase):
def test_make_and_remove_directory(self):
dir1 = os.path.join(self.tempRemoteDir, "dir1")
self.assertFalse(self.dm.dirExists(dir1))
self.dm.mkDir(dir1)
self.assertTrue(self.dm.dirExists(dir1))
self.dm.removeDir(dir1)
self.assertFalse(self.dm.dirExists(dir1))
def test_push_and_remove_file(self):
self.dm.pushFile(self.tempLocalFile, self.tempRemoteFile)
self.assertTrue(self.dm.fileExists(self.tempRemoteFile))
self.dm.removeFile(self.tempRemoteFile)
self.assertFalse(self.dm.fileExists(self.tempRemoteFile))
def test_push_and_pull_file(self):
self.dm.pushFile(self.tempLocalFile, self.tempRemoteFile)
self.assertTrue(self.dm.fileExists(self.tempRemoteFile))
self.assertFalse(os.path.exists("pulled.txt"))
self.dm.getFile(self.tempRemoteFile, "pulled.txt")
self.assertTrue(os.path.exists("pulled.txt"))
os.remove("pulled.txt")
def test_push_and_pull_directory_and_list_files(self):
self.dm.removeDir(self.tempRemoteDir)
self.assertFalse(self.dm.dirExists(self.tempRemoteDir))
self.dm.pushDir(self.tempLocalDir, self.tempRemoteDir)
self.assertTrue(self.dm.dirExists(self.tempRemoteDir))
response = self.dm.listFiles(self.tempRemoteDir)
# The local dir that was pushed contains the tempLocalFile
self.assertIn(os.path.basename(self.tempLocalFile), response)
# Create a temp dir to pull to
temp_dir = tempfile.mkdtemp()
self.assertTrue(os.path.exists(temp_dir))
self.dm.getDirectory(self.tempRemoteDir, temp_dir)
self.assertTrue(os.path.exists(self.tempLocalFile))
def test_move_and_remove_directories(self):
dir1 = os.path.join(self.tempRemoteDir, "dir1")
dir2 = os.path.join(self.tempRemoteDir, "dir2")
self.assertFalse(self.dm.dirExists(dir1))
self.dm.mkDir(dir1)
self.assertTrue(self.dm.dirExists(dir1))
self.assertFalse(self.dm.dirExists(dir2))
self.dm.moveTree(dir1, dir2)
self.assertTrue(self.dm.dirExists(dir2))
self.dm.removeDir(dir1)
self.dm.removeDir(dir2)
self.assertFalse(self.dm.dirExists(dir1))
self.assertFalse(self.dm.dirExists(dir2))
def test_push_and_remove_system_file(self):
out = StringIO()
self.assertTrue(find_mount_permissions(self.dm, "/system") == "ro")
self.assertFalse(self.dm.fileExists(self.tempRemoteSystemFile))
self.assertRaises(DMError, self.dm.pushFile, self.tempLocalFile, self.tempRemoteSystemFile)
self.dm.shell(['mount', '-w', '-o', 'remount', '/system'], out)
self.assertTrue(find_mount_permissions(self.dm, "/system") == "rw")
self.assertFalse(self.dm.fileExists(self.tempRemoteSystemFile))
self.dm.pushFile(self.tempLocalFile, self.tempRemoteSystemFile)
self.assertTrue(self.dm.fileExists(self.tempRemoteSystemFile))
self.dm.removeFile(self.tempRemoteSystemFile)
self.assertFalse(self.dm.fileExists(self.tempRemoteSystemFile))
self.dm.shell(['mount', '-r', '-o', 'remount', '/system'], out)
out.close()
self.assertTrue(find_mount_permissions(self.dm, "/system") == "ro")
class TestOther(DeviceManagerADBTestCase):
def test_get_list_of_processes(self):
self.assertEquals(type(self.dm.getProcessList()), list)
def test_get_current_time(self):
self.assertEquals(type(self.dm.getCurrentTime()), int)
def test_get_info(self):
self.assertEquals(type(self.dm.getInfo()), dict)
def test_list_devices(self):
self.assertEquals(len(list(self.dm.devices())), 1)
def test_shell(self):
out = StringIO()
self.dm.shell(["echo", "$COMPANY", ";", "pwd"], out,
env={"COMPANY": "Mozilla"}, cwd="/", timeout=4, root=True)
output = str(out.getvalue()).rstrip().splitlines()
out.close()
self.assertEquals(output, ['Mozilla', '/'])
def test_port_forwarding(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
# If successful then no exception is raised
self.dm.forward("tcp:%s" % port, "tcp:2828")
def test_port_forwarding_error(self):
self.assertRaises(DMError, self.dm.forward, "", "")
if __name__ == '__main__':
dm = DeviceManagerADB()
if not dm.devices():
print "There are no connected adb devices"
sys.exit(1)
if find_mount_permissions(dm, "/system") == "rw":
print "We've found out that /system is mounted as 'rw'. This is because the command " \
"'adb remount' has been run before running this test case. Please reboot the device " \
"and try again."
sys.exit(1)
unittest.main()
| mpl-2.0 |
rodrigofaccioli/drugdesign | molecular_dynamics/gromacs/spark/trajectory.py | 1 | 17610 | from pyspark import SparkContext, SparkConf, SparkFiles
from pyspark.sql import SQLContext, Row
from subprocess import Popen, PIPE
from datetime import datetime
import os
import sys
from md_description import md_description
from gromacs_utils import check_file_exists
from os_utils import make_directory, preparing_path, time_execution_log
from basic_analysis import run_basic_analysis
try:
import configparser
except ImportError:
import ConfigParser as configparser
def search_for_ligand_ndx_file(ndx_file):
other = 0
textFile = open(ndx_file, "r")
for line in textFile:
if line.upper().find("OTHER") > -1:
other = other + 1
textFile.close()
# Count how many other there is
if other > 0:
return True
else:
return False
def load_md_traj(file_of_md_analysis):
list_ret = []
f_file = open(file_of_md_analysis, "r")
for line in f_file:
splited_line = str(line).split()
path = str(splited_line[0]).strip()
prefix_ref = str(splited_line[1]).strip()
repetion_number = int(splited_line[2])
title_output = str(splited_line[3]).strip()
total_running = int(splited_line[4])
obj = md_description(path,
prefix_ref,
repetion_number,
title_output,
total_running)
list_ret.append(obj)
return list_ret
def main():
sc = SparkContext()
sqlCtx = SQLContext(sc)
config = configparser.ConfigParser()
config.read('config.ini')
# Path for gromacs spark project
path_spark_drugdesign = config.get('DRUGDESIGN', 'path_spark_drugdesign')
# Adding Python Source file
sc.addPyFile(os.path.join(path_spark_drugdesign, "gromacs_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign, "os_utils.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign, "basic_analysis.py"))
sc.addPyFile(os.path.join(path_spark_drugdesign, "md_description.py"))
# Path for gromacs program
gromacs_path = preparing_path(config.get('DRUGDESIGN', 'gromacs_path'))
time_dt = int(config.get('GROMACS_ANALYSIS', 'time_dt'))
time_dt_pdb = int(config.get('GROMACS_ANALYSIS', 'time_dt_pdb'))
water_layer_thickness = int(config.get('GROMACS_ANALYSIS',
'water_layer_thickness'))
# File that contains all md to create the trajectory
file_of_md_analysis = sys.argv[1]
check_file_exists(file_of_md_analysis)
start_time = datetime.now()
# Broadcast
gromacs_path = sc.broadcast(gromacs_path)
time_dt = sc.broadcast(time_dt)
time_dt_pdb = sc.broadcast(time_dt_pdb)
water_layer_thickness = sc.broadcast(water_layer_thickness)
# ********************* STARTING FUNCTION ***************************
def run_trajetory(md_obj):
ana_dir = os.path.join(md_obj.get_path(), "analysis")
make_directory(ana_dir)
# Original file names from the simulation
reference_xtc = os.path.join(md_obj.get_path(),
md_obj.get_simulation_prefix() + ".xtc")
reference_tpr = os.path.join(md_obj.get_path(),
md_obj.get_simulation_prefix() + ".tpr")
# File names after trajectory treatment.
allatom_xtc = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_fit.",
str(md_obj.get_repetion_number()),
".xtc"]))
allatom_tpr = reference_tpr
nonwater_xtc = os.path.join(ana_dir,"".join([md_obj.get_prefix_ref(),
"_non-water.",
str(md_obj.get_repetion_number()),
".xtc"]))
nonwater_tpr = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_non-water.",
str(md_obj.get_repetion_number()),
".tpr"]))
nonwater_pdb = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_non-water.",
str(md_obj.get_repetion_number()),
".pdb"]))
waterlayer_pdb = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_water-",
str(water_layer_thickness.value),
"A-layer.",
str(md_obj.get_repetion_number()),
".pdb"]))
# Trajectory treatment to remove PBC artifacts
xtc_whole = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_whole.",
str(md_obj.get_repetion_number()),
".xtc"]))
command = "".join(["echo System | ",
gromacs_path.value,
"./gmx trjconv ",
"-f ",
reference_xtc,
" -s ",
reference_tpr,
" -pbc whole",
" -o ",
xtc_whole,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Extracting first frame
gro_first_frame = os.path.join(ana_dir, "".join(["0.",
str(md_obj.get_repetion_number()),
".gro"]))
command = "".join(["echo System | ",
gromacs_path.value,
"./gmx trjconv ",
"-f ",
xtc_whole,
" -s ",
reference_tpr,
" -e 0.1 ",
" -o ",
gro_first_frame,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Removing jumps
xtc_nojump = os.path.join(ana_dir,
"".join([md_obj.get_prefix_ref(),
"_nojump.",
str(md_obj.get_repetion_number()),
".xtc"]))
command = "".join(["echo System | ",
gromacs_path.value,
"./gmx trjconv ",
"-f ",
xtc_whole,
" -s ",
gro_first_frame,
" -pbc nojump ",
" -o ",
xtc_nojump,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Centering the protein
xtc_center_protein = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_center.",
str(md_obj.get_repetion_number()),
".xtc"]))
command = "".join(["echo C-alpha System | ",
gromacs_path.value,
"./gmx trjconv ",
"-f ",
xtc_whole,
" -s ",
gro_first_frame,
" -center ",
" -o ",
xtc_center_protein,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Putting all atoms in a compact box
xtc_atoms_box = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_atom.",
str(md_obj.get_repetion_number()),
".xtc"]))
command = "".join(["echo System | ",
gromacs_path.value,
"./gmx trjconv ",
"-f ",
xtc_center_protein,
" -s ",
gro_first_frame,
" -ur compact ",
" -pbc atom ",
" -o ",
xtc_atoms_box,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Fitting the protein
command = "".join(["echo C-alpha System | ",
gromacs_path.value,
"./gmx trjconv ",
"-f ",
xtc_atoms_box,
" -s ",
gro_first_frame,
" -fit rot+trans ",
" -o ",
allatom_xtc,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Creating water-free trajectory
command = "".join(["echo non-water | ",
gromacs_path.value,
"./gmx convert-tpr ",
" -s ",
reference_tpr,
" -o ",
nonwater_tpr,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
command = "".join(["echo non-water | ",
gromacs_path.value,
"./gmx trjconv ",
"-f ",
allatom_xtc,
" -s ",
gro_first_frame,
" -o ",
nonwater_xtc,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
command = "".join(["echo system | ",
gromacs_path.value,
"./gmx trjconv ",
" -f ",
nonwater_xtc,
" -s ",
nonwater_tpr,
" -o ",
nonwater_pdb,
" -dt ",
str(time_dt_pdb.value),
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Creating water_layer_thickness - A water-layer pdb trajectory
t = 0
frame = 0
ndx_water_layer = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_water-layer.",
str(md_obj.get_repetion_number()),
".ndx"]))
ndx_temporary = os.path.join(ana_dir, "".join([md_obj.get_prefix_ref(),
"_temporary_",
str(md_obj.get_repetion_number()),
".ndx"]))
if os.path.isfile(waterlayer_pdb):
os.remove(waterlayer_pdb)
if os.path.isfile(ndx_water_layer):
os.remove(ndx_water_layer)
select_string = ('\'"water_layer" (same residue as ((resname SOL and within 0.'"$water_layer_thickness"' of group "Protein"))) or\
(group "Ion" and within 0.'"$water_layer_thickness"' of group "Protein") \
or (group "Protein") \'')
select_string = select_string.replace("$water_layer_thickness",
str(water_layer_thickness.value))
# Running make_ndx
command = "".join(["echo -e ",
"'chain z'\"\\n\"'q'\"\\n\" | ",
gromacs_path.value,
"gmx make_ndx ",
"-f ",
reference_tpr,
" -o ",
ndx_temporary,
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Are there ligands?
if search_for_ligand_ndx_file(ndx_temporary) is True:
select_string = (select_string
+ '\'or (same residue as ((resname SOL and within 0.'"$water_layer_thickness"' of group "Other"))) \
or (group "Ion" and within 0.'"$water_layer_thickness"' of group "Other") \
or (group "Other")\'')
select_string = select_string.replace("$water_layer_thickness",
str(water_layer_thickness.value))
command = "".join([gromacs_path.value,
"gmx select -f ",
allatom_xtc,
" -s ",
allatom_tpr,
" -on ",
ndx_water_layer,
" -select ",
select_string,
" -dt ",
str(time_dt_pdb.value),
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Creating pdb files
command = "".join(["echo ",
str(frame),
" | ",
gromacs_path.value,
"./gmx trjconv ",
"-f ",
allatom_xtc,
" -s ",
allatom_tpr,
" -n ",
ndx_water_layer,
" -o ",
"frame_",
str(frame),
".pdb ",
"-b ",
str(t),
" -e ",
str(t),
" >/dev/null 2>/dev/null"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
command = "".join(["echo MODEL ", str(frame), " >> ", waterlayer_pdb])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
command = "".join(["grep ATOM ",
"frame_",
str(frame),
".pdb ",
">> ",
waterlayer_pdb])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
command = "".join(["echo ENDML", ">> ", waterlayer_pdb])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Removing temporary files
command = "".join(["rm frame_", str(frame), ".pdb"])
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
frame = frame + 1
t = t + int(time_dt_pdb.value)
if os.path.isfile(xtc_whole):
os.remove(xtc_whole)
if os.path.isfile(xtc_nojump):
os.remove(xtc_nojump)
if os.path.isfile(xtc_center_protein):
os.remove(xtc_center_protein)
if os.path.isfile(xtc_atoms_box):
os.remove(xtc_atoms_box)
if os.path.isfile(ndx_water_layer):
os.remove(ndx_water_layer)
if os.path.isfile(gro_first_frame):
os.remove(gro_first_frame)
command = "rm \#* 2>/dev/null"
proc = Popen(command, shell=True, stdout=PIPE)
proc.communicate()
# Basic Analysis
basic_an_data = (gromacs_path.value,
nonwater_xtc,
nonwater_tpr,
md_obj.get_simulation_prefix(),
ana_dir,
time_dt.value)
run_basic_analysis(basic_an_data)
# ************************** END FUNCTION **********************************
list_obj_md = load_md_traj(file_of_md_analysis)
md_trajRDD = sc.parallelize(list_obj_md)
md_trajRDD.foreach(run_trajetory)
finish_time = datetime.now()
time_execution_log(finish_time, start_time, "gromacs_trajectory.log")
main()
| apache-2.0 |
zhaque/django-authopenid | django_authopenid/openid_store.py | 20 | 4366 | # -*- coding: utf-8 -*-
# Copyright 2007, 2008, 2009 by Benoît Chesneau <benoitc@e-engura.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
import operator
import time
import urllib
try:
from hashlib import md5 as _md5
except ImportError:
import md5
_md5 = md5.new
from django.db.models.query import Q
from django.conf import settings
from openid.association import Association as OIDAssociation
import openid.store.interface
import openid.store
from django_authopenid.models import Association, Nonce
from django_authopenid.utils import OpenID
class DjangoOpenIDStore(openid.store.interface.OpenIDStore):
def __init__(self):
self.max_nonce_age = 6 * 60 * 60 # Six hours
def storeAssociation(self, server_url, association):
assoc = Association(
server_url = server_url,
handle = association.handle,
secret = base64.encodestring(association.secret),
issued = association.issued,
lifetime = association.lifetime,
assoc_type = association.assoc_type
)
assoc.save()
def getAssociation(self, server_url, handle=None):
assocs = []
if handle is not None:
assocs = Association.objects.filter(
server_url = server_url, handle = handle
)
else:
assocs = Association.objects.filter(
server_url = server_url
)
if not assocs:
return None
associations = []
expired = []
for assoc in assocs:
association = OIDAssociation(
assoc.handle, base64.decodestring(assoc.secret), assoc.issued,
assoc.lifetime, assoc.assoc_type
)
if association.getExpiresIn() == 0:
expired.append(assoc)
else:
associations.append((association.issued, association))
for assoc in expired:
assoc.delete()
if not associations:
return None
associations.sort()
return associations[-1][1]
def removeAssociation(self, server_url, handle):
assocs = list(Association.objects.filter(
server_url = server_url, handle = handle
))
assocs_exist = len(assocs) > 0
for assoc in assocs:
assoc.delete()
return assocs_exist
def useNonce(self, server_url, timestamp, salt):
if abs(timestamp - time.time()) > openid.store.nonce.SKEW:
return False
query = [
Q(server_url__exact=server_url),
Q(timestamp__exact=timestamp),
Q(salt__exact=salt),
]
try:
ononce = Nonce.objects.get(reduce(operator.and_, query))
except Nonce.DoesNotExist:
ononce = Nonce(
server_url=server_url,
timestamp=timestamp,
salt=salt
)
ononce.save()
return True
return False
def cleanupNonces(self, _now=None):
if _now is None:
_now = int(time.time())
expired = Nonce.objects.filter(timestamp__lt=(_now - openid.store.nonce.SKEW))
count = expired.count()
if count:
expired.delete()
return count
def cleanupAssociations(self):
now = int(time.time())
expired = Association.objects.extra(
where=['issued + lifetime < %d' % now])
count = expired.count()
if count:
expired.delete()
return count
def getAuthKey(self):
# Use first AUTH_KEY_LEN characters of md5 hash of SECRET_KEY
return _md5(settings.SECRET_KEY).hexdigest()[:self.AUTH_KEY_LEN]
def isDumb(self):
return False | apache-2.0 |
renatofb/weblate | weblate/trans/tests/test_checks.py | 11 | 5598 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Helpers for quality checks tests.
"""
from django.test import TestCase
import uuid
class MockLanguage(object):
'''
Mock language object.
'''
def __init__(self, code='cs'):
self.code = code
class MockProject(object):
'''
Mock project object.
'''
def __init__(self):
self.id = 1
class MockSubProject(object):
'''
Mock subproject object.
'''
def __init__(self):
self.id = 1
self.project = MockProject()
class MockTranslation(object):
'''
Mock translation object.
'''
def __init__(self, code='cs'):
self.language = MockLanguage(code)
self.subproject = MockSubProject()
class MockUnit(object):
'''
Mock unit object.
'''
def __init__(self, checksum=None, flags='', code='cs', source='',
comment=''):
if checksum is None:
checksum = str(uuid.uuid1())
self.checksum = checksum
self.flags = flags
self.translation = MockTranslation(code)
self.source = source
self.fuzzy = False
self.translated = True
self.comment = comment
@property
def all_flags(self):
return self.flags.split(',')
def get_source_plurals(self):
return [self.source]
class CheckTestCase(TestCase):
'''
Generic test, also serves for testing base class.
'''
check = None
def setUp(self):
self.test_empty = ('', '', '')
self.test_good_matching = ('string', 'string', '')
self.test_good_none = ('string', 'string', '')
self.test_good_ignore = None
self.test_failure_1 = None
self.test_failure_2 = None
self.test_failure_3 = None
self.test_ignore_check = (
'x', 'x', self.check.ignore_string if self.check else ''
)
def do_test(self, expected, data, lang='cs'):
'''
Performs single check if we have data to test.
'''
if data is None or self.check is None:
return
result = self.check.check_single(
data[0],
data[1],
MockUnit(None, data[2], lang),
0
)
if expected:
self.assertTrue(
result,
'Check did not fire for "%s"/"%s" (%s)' % data
)
else:
self.assertFalse(
result,
'Check did fire for "%s"/"%s" (%s)' % data
)
def test_single_good_matching(self):
self.do_test(False, self.test_good_matching)
def test_single_good_none(self):
self.do_test(False, self.test_good_none)
def test_single_good_ignore(self):
self.do_test(False, self.test_good_ignore)
def test_single_empty(self):
self.do_test(False, self.test_empty)
def test_single_failure_1(self):
self.do_test(True, self.test_failure_1)
def test_single_failure_2(self):
self.do_test(True, self.test_failure_2)
def test_single_failure_3(self):
self.do_test(True, self.test_failure_3)
def test_check_good_matching_singular(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_good_matching[0]],
[self.test_good_matching[1]],
MockUnit(None, self.test_good_matching[2])
)
)
def test_check_good_matching_plural(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_good_matching[0]] * 2,
[self.test_good_matching[1]] * 3,
MockUnit(None, self.test_good_matching[2])
)
)
def test_check_failure_1_singular(self):
if self.test_failure_1 is None or self.check is None:
return
self.assertTrue(
self.check.check_target(
[self.test_failure_1[0]],
[self.test_failure_1[1]],
MockUnit(None, self.test_failure_1[2])
)
)
def test_check_failure_1_plural(self):
if self.test_failure_1 is None or self.check is None:
return
self.assertTrue(
self.check.check_target(
[self.test_failure_1[0]] * 2,
[self.test_failure_1[1]] * 3,
MockUnit(None, self.test_failure_1[2])
)
)
def test_check_ignore_check(self):
if self.check is None:
return
self.assertFalse(
self.check.check_target(
[self.test_ignore_check[0]] * 2,
[self.test_ignore_check[1]] * 3,
MockUnit(None, self.test_ignore_check[2])
)
)
| gpl-3.0 |
Ryanglambert/pybrain | pybrain/optimization/finitedifference/fd.py | 31 | 1941 | __author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de, Tom Schaul'
from scipy import ones, zeros, dot, ravel, random
from scipy.linalg import pinv
from pybrain.auxiliary import GradientDescent
from pybrain.optimization.optimizer import ContinuousOptimizer
class FiniteDifferences(ContinuousOptimizer):
""" Basic finite difference method. """
epsilon = 1.0
gamma = 0.999
batchSize = 10
# gradient descent parameters
learningRate = 0.1
learningRateDecay = None
momentum = 0.0
rprop = False
def _setInitEvaluable(self, evaluable):
ContinuousOptimizer._setInitEvaluable(self, evaluable)
self.current = self._initEvaluable
self.gd = GradientDescent()
self.gd.alpha = self.learningRate
if self.learningRateDecay is not None:
self.gd.alphadecay = self.learningRateDecay
self.gd.momentum = self.momentum
self.gd.rprop = self.rprop
self.gd.init(self._initEvaluable)
def perturbation(self):
""" produce a parameter perturbation """
deltas = random.uniform(-self.epsilon, self.epsilon, self.numParameters)
# reduce epsilon by factor gamma
self.epsilon *= self.gamma
return deltas
def _learnStep(self):
""" calls the gradient calculation function and executes a step in direction
of the gradient, scaled with a small learning rate alpha. """
# initialize matrix D and vector R
D = ones((self.batchSize, self.numParameters))
R = zeros((self.batchSize, 1))
# calculate the gradient with pseudo inverse
for i in range(self.batchSize):
deltas = self.perturbation()
x = self.current + deltas
D[i, :] = deltas
R[i, :] = self._oneEvaluation(x)
beta = dot(pinv(D), R)
gradient = ravel(beta)
# update the weights
self.current = self.gd(gradient)
| bsd-3-clause |
veronicagg/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyDictionary/setup.py | 28 | 1145 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestswaggerbatdictionaryservice"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.2.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestSwaggerBATdictionaryService",
author_email="",
url="",
keywords=["Swagger", "AutoRestSwaggerBATdictionaryService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest Swagger BAT
"""
)
| mit |
jamessy/select-sc | node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja.py | 372 | 89149 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import hashlib
import json
import multiprocessing
import os.path
import re
import signal
import subprocess
import sys
import gyp
import gyp.common
import gyp.msvs_emulation
import gyp.MSVSUtil as MSVSUtil
import gyp.xcode_emulation
from cStringIO import StringIO
from gyp.common import GetEnvironFallback
import gyp.ninja_syntax as ninja_syntax
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
# Gyp expects the following variables to be expandable by the build
# system to the appropriate locations. Ninja prefers paths to be
# known at gyp time. To resolve this, introduce special
# variables starting with $! and $| (which begin with a $ so gyp knows it
# should be treated specially, but is otherwise an invalid
# ninja/shell variable) that are passed to gyp here but expanded
# before writing out into the target .ninja files; see
# ExpandSpecial.
# $! is used for variables that represent a path and that can only appear at
# the start of a string, while $| is used for variables that can appear
# anywhere in a string.
'INTERMEDIATE_DIR': '$!INTERMEDIATE_DIR',
'SHARED_INTERMEDIATE_DIR': '$!PRODUCT_DIR/gen',
'PRODUCT_DIR': '$!PRODUCT_DIR',
'CONFIGURATION_NAME': '$|CONFIGURATION_NAME',
# Special variables that may be used by gyp 'rule' targets.
# We generate definitions for these variables on the fly when processing a
# rule.
'RULE_INPUT_ROOT': '${root}',
'RULE_INPUT_DIRNAME': '${dirname}',
'RULE_INPUT_PATH': '${source}',
'RULE_INPUT_EXT': '${ext}',
'RULE_INPUT_NAME': '${name}',
}
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
# TODO: figure out how to not build extra host objects in the non-cross-compile
# case when this is enabled, and enable unconditionally.
generator_supports_multiple_toolsets = (
os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
def StripPrefix(arg, prefix):
if arg.startswith(prefix):
return arg[len(prefix):]
return arg
def QuoteShellArgument(arg, flavor):
"""Quote a string such that it will be interpreted as a single argument
by the shell."""
# Rather than attempting to enumerate the bad shell characters, just
# whitelist common OK ones and quote anything else.
if re.match(r'^[a-zA-Z0-9_=.\\/-]+$', arg):
return arg # No quoting necessary.
if flavor == 'win':
return gyp.msvs_emulation.QuoteForRspFile(arg)
return "'" + arg.replace("'", "'" + '"\'"' + "'") + "'"
def Define(d, flavor):
"""Takes a preprocessor define and returns a -D parameter that's ninja- and
shell-escaped."""
if flavor == 'win':
# cl.exe replaces literal # characters with = in preprocesor definitions for
# some reason. Octal-encode to work around that.
d = d.replace('#', '\\%03o' % ord('#'))
return QuoteShellArgument(ninja_syntax.escape('-D' + d), flavor)
def AddArch(output, arch):
"""Adds an arch string to an output path."""
output, extension = os.path.splitext(output)
return '%s.%s%s' % (output, arch, extension)
class Target:
"""Target represents the paths used within a single gyp target.
Conceptually, building a single target A is a series of steps:
1) actions/rules/copies generates source/resources/etc.
2) compiles generates .o files
3) link generates a binary (library/executable)
4) bundle merges the above in a mac bundle
(Any of these steps can be optional.)
From a build ordering perspective, a dependent target B could just
depend on the last output of this series of steps.
But some dependent commands sometimes need to reach inside the box.
For example, when linking B it needs to get the path to the static
library generated by A.
This object stores those paths. To keep things simple, member
variables only store concrete paths to single files, while methods
compute derived values like "the last output of the target".
"""
def __init__(self, type):
# Gyp type ("static_library", etc.) of this target.
self.type = type
# File representing whether any input dependencies necessary for
# dependent actions have completed.
self.preaction_stamp = None
# File representing whether any input dependencies necessary for
# dependent compiles have completed.
self.precompile_stamp = None
# File representing the completion of actions/rules/copies, if any.
self.actions_stamp = None
# Path to the output of the link step, if any.
self.binary = None
# Path to the file representing the completion of building the bundle,
# if any.
self.bundle = None
# On Windows, incremental linking requires linking against all the .objs
# that compose a .lib (rather than the .lib itself). That list is stored
# here.
self.component_objs = None
# Windows only. The import .lib is the output of a build step, but
# because dependents only link against the lib (not both the lib and the
# dll) we keep track of the import library here.
self.import_lib = None
def Linkable(self):
"""Return true if this is a target that can be linked against."""
return self.type in ('static_library', 'shared_library')
def UsesToc(self, flavor):
"""Return true if the target should produce a restat rule based on a TOC
file."""
# For bundles, the .TOC should be produced for the binary, not for
# FinalOutput(). But the naive approach would put the TOC file into the
# bundle, so don't do this for bundles for now.
if flavor == 'win' or self.bundle:
return False
return self.type in ('shared_library', 'loadable_module')
def PreActionInput(self, flavor):
"""Return the path, if any, that should be used as a dependency of
any dependent action step."""
if self.UsesToc(flavor):
return self.FinalOutput() + '.TOC'
return self.FinalOutput() or self.preaction_stamp
def PreCompileInput(self):
"""Return the path, if any, that should be used as a dependency of
any dependent compile step."""
return self.actions_stamp or self.precompile_stamp
def FinalOutput(self):
"""Return the last output of the target, which depends on all prior
steps."""
return self.bundle or self.binary or self.actions_stamp
# A small discourse on paths as used within the Ninja build:
# All files we produce (both at gyp and at build time) appear in the
# build directory (e.g. out/Debug).
#
# Paths within a given .gyp file are always relative to the directory
# containing the .gyp file. Call these "gyp paths". This includes
# sources as well as the starting directory a given gyp rule/action
# expects to be run from. We call the path from the source root to
# the gyp file the "base directory" within the per-.gyp-file
# NinjaWriter code.
#
# All paths as written into the .ninja files are relative to the build
# directory. Call these paths "ninja paths".
#
# We translate between these two notions of paths with two helper
# functions:
#
# - GypPathToNinja translates a gyp path (i.e. relative to the .gyp file)
# into the equivalent ninja path.
#
# - GypPathToUniqueOutput translates a gyp path into a ninja path to write
# an output file; the result can be namespaced such that it is unique
# to the input file name as well as the output target name.
class NinjaWriter:
def __init__(self, qualified_target, target_outputs, base_dir, build_dir,
output_file, toplevel_build, output_file_name, flavor,
toplevel_dir=None):
"""
base_dir: path from source root to directory containing this gyp file,
by gyp semantics, all input paths are relative to this
build_dir: path from source root to build output
toplevel_dir: path to the toplevel directory
"""
self.qualified_target = qualified_target
self.target_outputs = target_outputs
self.base_dir = base_dir
self.build_dir = build_dir
self.ninja = ninja_syntax.Writer(output_file)
self.toplevel_build = toplevel_build
self.output_file_name = output_file_name
self.flavor = flavor
self.abs_build_dir = None
if toplevel_dir is not None:
self.abs_build_dir = os.path.abspath(os.path.join(toplevel_dir,
build_dir))
self.obj_ext = '.obj' if flavor == 'win' else '.o'
if flavor == 'win':
# See docstring of msvs_emulation.GenerateEnvironmentFiles().
self.win_env = {}
for arch in ('x86', 'x64'):
self.win_env[arch] = 'environment.' + arch
# Relative path from build output dir to base dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, toplevel_dir)
self.build_to_base = os.path.join(build_to_top, base_dir)
# Relative path from base dir to build dir.
base_to_top = gyp.common.InvertRelativePath(base_dir, toplevel_dir)
self.base_to_build = os.path.join(base_to_top, build_dir)
def ExpandSpecial(self, path, product_dir=None):
"""Expand specials like $!PRODUCT_DIR in |path|.
If |product_dir| is None, assumes the cwd is already the product
dir. Otherwise, |product_dir| is the relative path to the product
dir.
"""
PRODUCT_DIR = '$!PRODUCT_DIR'
if PRODUCT_DIR in path:
if product_dir:
path = path.replace(PRODUCT_DIR, product_dir)
else:
path = path.replace(PRODUCT_DIR + '/', '')
path = path.replace(PRODUCT_DIR + '\\', '')
path = path.replace(PRODUCT_DIR, '.')
INTERMEDIATE_DIR = '$!INTERMEDIATE_DIR'
if INTERMEDIATE_DIR in path:
int_dir = self.GypPathToUniqueOutput('gen')
# GypPathToUniqueOutput generates a path relative to the product dir,
# so insert product_dir in front if it is provided.
path = path.replace(INTERMEDIATE_DIR,
os.path.join(product_dir or '', int_dir))
CONFIGURATION_NAME = '$|CONFIGURATION_NAME'
path = path.replace(CONFIGURATION_NAME, self.config_name)
return path
def ExpandRuleVariables(self, path, root, dirname, source, ext, name):
if self.flavor == 'win':
path = self.msvs_settings.ConvertVSMacros(
path, config=self.config_name)
path = path.replace(generator_default_variables['RULE_INPUT_ROOT'], root)
path = path.replace(generator_default_variables['RULE_INPUT_DIRNAME'],
dirname)
path = path.replace(generator_default_variables['RULE_INPUT_PATH'], source)
path = path.replace(generator_default_variables['RULE_INPUT_EXT'], ext)
path = path.replace(generator_default_variables['RULE_INPUT_NAME'], name)
return path
def GypPathToNinja(self, path, env=None):
"""Translate a gyp path to a ninja path, optionally expanding environment
variable references in |path| with |env|.
See the above discourse on path conversions."""
if env:
if self.flavor == 'mac':
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
elif self.flavor == 'win':
path = gyp.msvs_emulation.ExpandMacros(path, env)
if path.startswith('$!'):
expanded = self.ExpandSpecial(path)
if self.flavor == 'win':
expanded = os.path.normpath(expanded)
return expanded
if '$|' in path:
path = self.ExpandSpecial(path)
assert '$' not in path, path
return os.path.normpath(os.path.join(self.build_to_base, path))
def GypPathToUniqueOutput(self, path, qualified=True):
"""Translate a gyp path to a ninja path for writing output.
If qualified is True, qualify the resulting filename with the name
of the target. This is necessary when e.g. compiling the same
path twice for two separate output targets.
See the above discourse on path conversions."""
path = self.ExpandSpecial(path)
assert not path.startswith('$'), path
# Translate the path following this scheme:
# Input: foo/bar.gyp, target targ, references baz/out.o
# Output: obj/foo/baz/targ.out.o (if qualified)
# obj/foo/baz/out.o (otherwise)
# (and obj.host instead of obj for cross-compiles)
#
# Why this scheme and not some other one?
# 1) for a given input, you can compute all derived outputs by matching
# its path, even if the input is brought via a gyp file with '..'.
# 2) simple files like libraries and stamps have a simple filename.
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
path_dir, path_basename = os.path.split(path)
if qualified:
path_basename = self.name + '.' + path_basename
return os.path.normpath(os.path.join(obj, self.base_dir, path_dir,
path_basename))
def WriteCollapsedDependencies(self, name, targets):
"""Given a list of targets, return a path for a single file
representing the result of building all the targets or None.
Uses a stamp file if necessary."""
assert targets == filter(None, targets), targets
if len(targets) == 0:
return None
if len(targets) > 1:
stamp = self.GypPathToUniqueOutput(name + '.stamp')
targets = self.ninja.build(stamp, 'stamp', targets)
self.ninja.newline()
return targets[0]
def _SubninjaNameForArch(self, arch):
output_file_base = os.path.splitext(self.output_file_name)[0]
return '%s.%s.ninja' % (output_file_base, arch)
def WriteSpec(self, spec, config_name, generator_flags):
"""The main entry point for NinjaWriter: write the build rules for a spec.
Returns a Target object, which represents the output paths for this spec.
Returns None if there are no outputs (e.g. a settings-only 'none' type
target)."""
self.config_name = config_name
self.name = spec['target_name']
self.toolset = spec['toolset']
config = spec['configurations'][config_name]
self.target = Target(spec['type'])
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
# Track if this target contains any C++ files, to decide if gcc or g++
# should be used for linking.
self.uses_cpp = False
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
self.xcode_settings = self.msvs_settings = None
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
if self.flavor == 'win':
self.msvs_settings = gyp.msvs_emulation.MsvsSettings(spec,
generator_flags)
arch = self.msvs_settings.GetArch(config_name)
self.ninja.variable('arch', self.win_env[arch])
self.ninja.variable('cc', '$cl_' + arch)
self.ninja.variable('cxx', '$cl_' + arch)
if self.flavor == 'mac':
self.archs = self.xcode_settings.GetActiveArchs(config_name)
if len(self.archs) > 1:
self.arch_subninjas = dict(
(arch, ninja_syntax.Writer(
OpenOutput(os.path.join(self.toplevel_build,
self._SubninjaNameForArch(arch)),
'w')))
for arch in self.archs)
# Compute predepends for all rules.
# actions_depends is the dependencies this target depends on before running
# any of its action/rule/copy steps.
# compile_depends is the dependencies this target depends on before running
# any of its compile steps.
actions_depends = []
compile_depends = []
# TODO(evan): it is rather confusing which things are lists and which
# are strings. Fix these.
if 'dependencies' in spec:
for dep in spec['dependencies']:
if dep in self.target_outputs:
target = self.target_outputs[dep]
actions_depends.append(target.PreActionInput(self.flavor))
compile_depends.append(target.PreCompileInput())
actions_depends = filter(None, actions_depends)
compile_depends = filter(None, compile_depends)
actions_depends = self.WriteCollapsedDependencies('actions_depends',
actions_depends)
compile_depends = self.WriteCollapsedDependencies('compile_depends',
compile_depends)
self.target.preaction_stamp = actions_depends
self.target.precompile_stamp = compile_depends
# Write out actions, rules, and copies. These must happen before we
# compile any sources, so compute a list of predependencies for sources
# while we do it.
extra_sources = []
mac_bundle_depends = []
self.target.actions_stamp = self.WriteActionsRulesCopies(
spec, extra_sources, actions_depends, mac_bundle_depends)
# If we have actions/rules/copies, we depend directly on those, but
# otherwise we depend on dependent target's actions/rules/copies etc.
# We never need to explicitly depend on previous target's link steps,
# because no compile ever depends on them.
compile_depends_stamp = (self.target.actions_stamp or compile_depends)
# Write out the compilation steps, if any.
link_deps = []
sources = extra_sources + spec.get('sources', [])
if sources:
if self.flavor == 'mac' and len(self.archs) > 1:
# Write subninja file containing compile and link commands scoped to
# a single arch if a fat binary is being built.
for arch in self.archs:
self.ninja.subninja(self._SubninjaNameForArch(arch))
pch = None
if self.flavor == 'win':
gyp.msvs_emulation.VerifyMissingSources(
sources, self.abs_build_dir, generator_flags, self.GypPathToNinja)
pch = gyp.msvs_emulation.PrecompiledHeader(
self.msvs_settings, config_name, self.GypPathToNinja,
self.GypPathToUniqueOutput, self.obj_ext)
else:
pch = gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, self.GypPathToNinja,
lambda path, lang: self.GypPathToUniqueOutput(path + '-' + lang))
link_deps = self.WriteSources(
self.ninja, config_name, config, sources, compile_depends_stamp, pch,
spec)
# Some actions/rules output 'sources' that are already object files.
obj_outputs = [f for f in sources if f.endswith(self.obj_ext)]
if obj_outputs:
if self.flavor != 'mac' or len(self.archs) == 1:
link_deps += [self.GypPathToNinja(o) for o in obj_outputs]
else:
print "Warning: Actions/rules writing object files don't work with " \
"multiarch targets, dropping. (target %s)" % spec['target_name']
if self.flavor == 'win' and self.target.type == 'static_library':
self.target.component_objs = link_deps
# Write out a link step, if needed.
output = None
is_empty_bundle = not link_deps and not mac_bundle_depends
if link_deps or self.target.actions_stamp or actions_depends:
output = self.WriteTarget(spec, config_name, config, link_deps,
self.target.actions_stamp or actions_depends)
if self.is_mac_bundle:
mac_bundle_depends.append(output)
# Bundle all of the above together, if needed.
if self.is_mac_bundle:
output = self.WriteMacBundle(spec, mac_bundle_depends, is_empty_bundle)
if not output:
return None
assert self.target.FinalOutput(), output
return self.target
def _WinIdlRule(self, source, prebuild, outputs):
"""Handle the implicit VS .idl rule for one source file. Fills |outputs|
with files that are generated."""
outdir, output, vars, flags = self.msvs_settings.GetIdlBuildData(
source, self.config_name)
outdir = self.GypPathToNinja(outdir)
def fix_path(path, rel=None):
path = os.path.join(outdir, path)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
path = self.ExpandRuleVariables(
path, root, dirname, source, ext, basename)
if rel:
path = os.path.relpath(path, rel)
return path
vars = [(name, fix_path(value, outdir)) for name, value in vars]
output = [fix_path(p) for p in output]
vars.append(('outdir', outdir))
vars.append(('idlflags', flags))
input = self.GypPathToNinja(source)
self.ninja.build(output, 'idl', input,
variables=vars, order_only=prebuild)
outputs.extend(output)
def WriteWinIdlFiles(self, spec, prebuild):
"""Writes rules to match MSVS's implicit idl handling."""
assert self.flavor == 'win'
if self.msvs_settings.HasExplicitIdlRules(spec):
return []
outputs = []
for source in filter(lambda x: x.endswith('.idl'), spec['sources']):
self._WinIdlRule(source, prebuild, outputs)
return outputs
def WriteActionsRulesCopies(self, spec, extra_sources, prebuild,
mac_bundle_depends):
"""Write out the Actions, Rules, and Copies steps. Return a path
representing the outputs of these steps."""
outputs = []
if self.is_mac_bundle:
mac_bundle_resources = spec.get('mac_bundle_resources', [])[:]
else:
mac_bundle_resources = []
extra_mac_bundle_resources = []
if 'actions' in spec:
outputs += self.WriteActions(spec['actions'], extra_sources, prebuild,
extra_mac_bundle_resources)
if 'rules' in spec:
outputs += self.WriteRules(spec['rules'], extra_sources, prebuild,
mac_bundle_resources,
extra_mac_bundle_resources)
if 'copies' in spec:
outputs += self.WriteCopies(spec['copies'], prebuild, mac_bundle_depends)
if 'sources' in spec and self.flavor == 'win':
outputs += self.WriteWinIdlFiles(spec, prebuild)
stamp = self.WriteCollapsedDependencies('actions_rules_copies', outputs)
if self.is_mac_bundle:
self.WriteMacBundleResources(
extra_mac_bundle_resources + mac_bundle_resources, mac_bundle_depends)
self.WriteMacInfoPlist(mac_bundle_depends)
return stamp
def GenerateDescription(self, verb, message, fallback):
"""Generate and return a description of a build step.
|verb| is the short summary, e.g. ACTION or RULE.
|message| is a hand-written description, or None if not available.
|fallback| is the gyp-level name of the step, usable as a fallback.
"""
if self.toolset != 'target':
verb += '(%s)' % self.toolset
if message:
return '%s %s' % (verb, self.ExpandSpecial(message))
else:
return '%s %s: %s' % (verb, self.name, fallback)
def WriteActions(self, actions, extra_sources, prebuild,
extra_mac_bundle_resources):
# Actions cd into the base directory.
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv(
'$!PRODUCT_DIR', config=self.config_name)
all_outputs = []
for action in actions:
# First write out a rule for the action.
name = '%s_%s' % (action['action_name'],
hashlib.md5(self.qualified_target).hexdigest())
description = self.GenerateDescription('ACTION',
action.get('message', None),
name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(action)
if self.flavor == 'win' else False)
args = action['action']
rule_name, _ = self.WriteNewNinjaRule(name, args, description,
is_cygwin, env=env)
inputs = [self.GypPathToNinja(i, env) for i in action['inputs']]
if int(action.get('process_outputs_as_sources', False)):
extra_sources += action['outputs']
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += action['outputs']
outputs = [self.GypPathToNinja(o, env) for o in action['outputs']]
# Then write out an edge using the rule.
self.ninja.build(outputs, rule_name, inputs,
order_only=prebuild)
all_outputs += outputs
self.ninja.newline()
return all_outputs
def WriteRules(self, rules, extra_sources, prebuild,
mac_bundle_resources, extra_mac_bundle_resources):
env = self.GetSortedXcodeEnv()
all_outputs = []
for rule in rules:
# First write out a rule for the rule action.
name = '%s_%s' % (rule['rule_name'],
hashlib.md5(self.qualified_target).hexdigest())
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
args = rule['action']
description = self.GenerateDescription(
'RULE',
rule.get('message', None),
('%s ' + generator_default_variables['RULE_INPUT_PATH']) % name)
is_cygwin = (self.msvs_settings.IsRuleRunUnderCygwin(rule)
if self.flavor == 'win' else False)
rule_name, args = self.WriteNewNinjaRule(
name, args, description, is_cygwin, env=env)
# TODO: if the command references the outputs directly, we should
# simplify it to just use $out.
# Rules can potentially make use of some special variables which
# must vary per source file.
# Compute the list of variables we'll need to provide.
special_locals = ('source', 'root', 'dirname', 'ext', 'name')
needed_variables = set(['source'])
for argument in args:
for var in special_locals:
if ('${%s}' % var) in argument:
needed_variables.add(var)
def cygwin_munge(path):
if is_cygwin:
return path.replace('\\', '/')
return path
# For each source file, write an edge that generates all the outputs.
for source in rule.get('rule_sources', []):
source = os.path.normpath(source)
dirname, basename = os.path.split(source)
root, ext = os.path.splitext(basename)
# Gather the list of inputs and outputs, expanding $vars if possible.
outputs = [self.ExpandRuleVariables(o, root, dirname,
source, ext, basename)
for o in rule['outputs']]
inputs = [self.ExpandRuleVariables(i, root, dirname,
source, ext, basename)
for i in rule.get('inputs', [])]
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
was_mac_bundle_resource = source in mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Note: This is n_resources * n_outputs_in_rule. Put to-be-removed
# items in a set and remove them all in a single pass if this becomes
# a performance issue.
if was_mac_bundle_resource:
mac_bundle_resources.remove(source)
extra_bindings = []
for var in needed_variables:
if var == 'root':
extra_bindings.append(('root', cygwin_munge(root)))
elif var == 'dirname':
# '$dirname' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
dirname_expanded = self.ExpandSpecial(dirname, self.base_to_build)
extra_bindings.append(('dirname', cygwin_munge(dirname_expanded)))
elif var == 'source':
# '$source' is a parameter to the rule action, which means
# it shouldn't be converted to a Ninja path. But we don't
# want $!PRODUCT_DIR in there either.
source_expanded = self.ExpandSpecial(source, self.base_to_build)
extra_bindings.append(('source', cygwin_munge(source_expanded)))
elif var == 'ext':
extra_bindings.append(('ext', ext))
elif var == 'name':
extra_bindings.append(('name', cygwin_munge(basename)))
else:
assert var == None, repr(var)
inputs = [self.GypPathToNinja(i, env) for i in inputs]
outputs = [self.GypPathToNinja(o, env) for o in outputs]
extra_bindings.append(('unique_name',
hashlib.md5(outputs[0]).hexdigest()))
self.ninja.build(outputs, rule_name, self.GypPathToNinja(source),
implicit=inputs,
order_only=prebuild,
variables=extra_bindings)
all_outputs.extend(outputs)
return all_outputs
def WriteCopies(self, copies, prebuild, mac_bundle_depends):
outputs = []
env = self.GetSortedXcodeEnv()
for copy in copies:
for path in copy['files']:
# Normalize the path so trailing slashes don't confuse us.
path = os.path.normpath(path)
basename = os.path.split(path)[1]
src = self.GypPathToNinja(path, env)
dst = self.GypPathToNinja(os.path.join(copy['destination'], basename),
env)
outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild)
if self.is_mac_bundle:
# gyp has mac_bundle_resources to copy things into a bundle's
# Resources folder, but there's no built-in way to copy files to other
# places in the bundle. Hence, some targets use copies for this. Check
# if this file is copied into the current bundle, and if so add it to
# the bundle depends so that dependent targets get rebuilt if the copy
# input changes.
if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()):
mac_bundle_depends.append(dst)
return outputs
def WriteMacBundleResources(self, resources, bundle_depends):
"""Writes ninja edges for 'mac_bundle_resources'."""
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, map(self.GypPathToNinja, resources)):
output = self.ExpandSpecial(output)
self.ninja.build(output, 'mac_tool', res,
variables=[('mactool_cmd', 'copy-bundle-resource')])
bundle_depends.append(output)
def WriteMacInfoPlist(self, bundle_depends):
"""Write build rules for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'],
self.xcode_settings, self.GypPathToNinja)
if not info_plist:
return
out = self.ExpandSpecial(out)
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = self.GypPathToUniqueOutput(
os.path.basename(info_plist))
defines = ' '.join([Define(d, self.flavor) for d in defines])
info_plist = self.ninja.build(
intermediate_plist, 'preprocess_infoplist', info_plist,
variables=[('defines',defines)])
env = self.GetSortedXcodeEnv(additional_settings=extra_env)
env = self.ComputeExportEnvString(env)
keys = self.xcode_settings.GetExtraPlistItems(self.config_name)
keys = QuoteShellArgument(json.dumps(keys), self.flavor)
self.ninja.build(out, 'copy_infoplist', info_plist,
variables=[('env', env), ('keys', keys)])
bundle_depends.append(out)
def WriteSources(self, ninja_file, config_name, config, sources, predepends,
precompiled_header, spec):
"""Write build rules to compile all of |sources|."""
if self.toolset == 'host':
self.ninja.variable('ar', '$ar_host')
self.ninja.variable('cc', '$cc_host')
self.ninja.variable('cxx', '$cxx_host')
self.ninja.variable('ld', '$ld_host')
self.ninja.variable('ldxx', '$ldxx_host')
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteSourcesForArch(
self.ninja, config_name, config, sources, predepends,
precompiled_header, spec)
else:
return dict((arch, self.WriteSourcesForArch(
self.arch_subninjas[arch], config_name, config, sources, predepends,
precompiled_header, spec, arch=arch))
for arch in self.archs)
def WriteSourcesForArch(self, ninja_file, config_name, config, sources,
predepends, precompiled_header, spec, arch=None):
"""Write build rules to compile all of |sources|."""
extra_defines = []
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(config_name, arch=arch)
cflags_c = self.xcode_settings.GetCflagsC(config_name)
cflags_cc = self.xcode_settings.GetCflagsCC(config_name)
cflags_objc = ['$cflags_c'] + \
self.xcode_settings.GetCflagsObjC(config_name)
cflags_objcc = ['$cflags_cc'] + \
self.xcode_settings.GetCflagsObjCC(config_name)
elif self.flavor == 'win':
cflags = self.msvs_settings.GetCflags(config_name)
cflags_c = self.msvs_settings.GetCflagsC(config_name)
cflags_cc = self.msvs_settings.GetCflagsCC(config_name)
extra_defines = self.msvs_settings.GetComputedDefines(config_name)
# See comment at cc_command for why there's two .pdb files.
pdbpath_c = pdbpath_cc = self.msvs_settings.GetCompilerPdbName(
config_name, self.ExpandSpecial)
if not pdbpath_c:
obj = 'obj'
if self.toolset != 'target':
obj += '.' + self.toolset
pdbpath = os.path.normpath(os.path.join(obj, self.base_dir, self.name))
pdbpath_c = pdbpath + '.c.pdb'
pdbpath_cc = pdbpath + '.cc.pdb'
self.WriteVariableList(ninja_file, 'pdbname_c', [pdbpath_c])
self.WriteVariableList(ninja_file, 'pdbname_cc', [pdbpath_cc])
self.WriteVariableList(ninja_file, 'pchprefix', [self.name])
else:
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cc = config.get('cflags_cc', [])
# Respect environment variables related to build, but target-specific
# flags can still override them.
if self.toolset == 'target':
cflags_c = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CFLAGS', '').split() + cflags_c)
cflags_cc = (os.environ.get('CPPFLAGS', '').split() +
os.environ.get('CXXFLAGS', '').split() + cflags_cc)
defines = config.get('defines', []) + extra_defines
self.WriteVariableList(ninja_file, 'defines',
[Define(d, self.flavor) for d in defines])
if self.flavor == 'win':
self.WriteVariableList(ninja_file, 'rcflags',
[QuoteShellArgument(self.ExpandSpecial(f), self.flavor)
for f in self.msvs_settings.GetRcflags(config_name,
self.GypPathToNinja)])
include_dirs = config.get('include_dirs', [])
env = self.GetSortedXcodeEnv()
if self.flavor == 'win':
env = self.msvs_settings.GetVSMacroEnv('$!PRODUCT_DIR',
config=config_name)
include_dirs = self.msvs_settings.AdjustIncludeDirs(include_dirs,
config_name)
self.WriteVariableList(ninja_file, 'includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in include_dirs])
pch_commands = precompiled_header.GetPchBuildCommands(arch)
if self.flavor == 'mac':
# Most targets use no precompiled headers, so only write these if needed.
for ext, var in [('c', 'cflags_pch_c'), ('cc', 'cflags_pch_cc'),
('m', 'cflags_pch_objc'), ('mm', 'cflags_pch_objcc')]:
include = precompiled_header.GetInclude(ext, arch)
if include: ninja_file.variable(var, include)
self.WriteVariableList(ninja_file, 'cflags',
map(self.ExpandSpecial, cflags))
self.WriteVariableList(ninja_file, 'cflags_c',
map(self.ExpandSpecial, cflags_c))
self.WriteVariableList(ninja_file, 'cflags_cc',
map(self.ExpandSpecial, cflags_cc))
if self.flavor == 'mac':
self.WriteVariableList(ninja_file, 'cflags_objc',
map(self.ExpandSpecial, cflags_objc))
self.WriteVariableList(ninja_file, 'cflags_objcc',
map(self.ExpandSpecial, cflags_objcc))
ninja_file.newline()
outputs = []
has_rc_source = False
for source in sources:
filename, ext = os.path.splitext(source)
ext = ext[1:]
obj_ext = self.obj_ext
if ext in ('cc', 'cpp', 'cxx'):
command = 'cxx'
self.uses_cpp = True
elif ext == 'c' or (ext == 'S' and self.flavor != 'win'):
command = 'cc'
elif ext == 's' and self.flavor != 'win': # Doesn't generate .o.d files.
command = 'cc_s'
elif (self.flavor == 'win' and ext == 'asm' and
self.msvs_settings.GetArch(config_name) == 'x86' and
not self.msvs_settings.HasExplicitAsmRules(spec)):
# Asm files only get auto assembled for x86 (not x64).
command = 'asm'
# Add the _asm suffix as msvs is capable of handling .cc and
# .asm files of the same name without collision.
obj_ext = '_asm.obj'
elif self.flavor == 'mac' and ext == 'm':
command = 'objc'
elif self.flavor == 'mac' and ext == 'mm':
command = 'objcxx'
self.uses_cpp = True
elif self.flavor == 'win' and ext == 'rc':
command = 'rc'
obj_ext = '.res'
has_rc_source = True
else:
# Ignore unhandled extensions.
continue
input = self.GypPathToNinja(source)
output = self.GypPathToUniqueOutput(filename + obj_ext)
if arch is not None:
output = AddArch(output, arch)
implicit = precompiled_header.GetObjDependencies([input], [output], arch)
variables = []
if self.flavor == 'win':
variables, output, implicit = precompiled_header.GetFlagsModifications(
input, output, implicit, command, cflags_c, cflags_cc,
self.ExpandSpecial)
ninja_file.build(output, command, input,
implicit=[gch for _, _, gch in implicit],
order_only=predepends, variables=variables)
outputs.append(output)
if has_rc_source:
resource_include_dirs = config.get('resource_include_dirs', include_dirs)
self.WriteVariableList(ninja_file, 'resource_includes',
[QuoteShellArgument('-I' + self.GypPathToNinja(i, env), self.flavor)
for i in resource_include_dirs])
self.WritePchTargets(ninja_file, pch_commands)
ninja_file.newline()
return outputs
def WritePchTargets(self, ninja_file, pch_commands):
"""Writes ninja rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
var_name = {
'c': 'cflags_pch_c',
'cc': 'cflags_pch_cc',
'm': 'cflags_pch_objc',
'mm': 'cflags_pch_objcc',
}[lang]
map = { 'c': 'cc', 'cc': 'cxx', 'm': 'objc', 'mm': 'objcxx', }
cmd = map.get(lang)
ninja_file.build(gch, cmd, input, variables=[(var_name, lang_flag)])
def WriteLink(self, spec, config_name, config, link_deps):
"""Write out a link step. Fills out target.binary. """
if self.flavor != 'mac' or len(self.archs) == 1:
return self.WriteLinkForArch(
self.ninja, spec, config_name, config, link_deps)
else:
output = self.ComputeOutput(spec)
inputs = [self.WriteLinkForArch(self.arch_subninjas[arch], spec,
config_name, config, link_deps[arch],
arch=arch)
for arch in self.archs]
extra_bindings = []
if not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
self.ninja.build(output, 'lipo', inputs, variables=extra_bindings)
return output
def WriteLinkForArch(self, ninja_file, spec, config_name, config,
link_deps, arch=None):
"""Write out a link step. Fills out target.binary. """
command = {
'executable': 'link',
'loadable_module': 'solink_module',
'shared_library': 'solink',
}[spec['type']]
command_suffix = ''
implicit_deps = set()
solibs = set()
if 'dependencies' in spec:
# Two kinds of dependencies:
# - Linkable dependencies (like a .a or a .so): add them to the link line.
# - Non-linkable dependencies (like a rule that generates a file
# and writes a stamp file): add them to implicit_deps
extra_link_deps = set()
for dep in spec['dependencies']:
target = self.target_outputs.get(dep)
if not target:
continue
linkable = target.Linkable()
if linkable:
new_deps = []
if (self.flavor == 'win' and
target.component_objs and
self.msvs_settings.IsUseLibraryDependencyInputs(config_name)):
new_deps = target.component_objs
elif self.flavor == 'win' and target.import_lib:
new_deps = [target.import_lib]
elif target.UsesToc(self.flavor):
solibs.add(target.binary)
implicit_deps.add(target.binary + '.TOC')
else:
new_deps = [target.binary]
for new_dep in new_deps:
if new_dep not in extra_link_deps:
extra_link_deps.add(new_dep)
link_deps.append(new_dep)
final_output = target.FinalOutput()
if not linkable or final_output != target.binary:
implicit_deps.add(final_output)
extra_bindings = []
if self.uses_cpp and self.flavor != 'win':
extra_bindings.append(('ld', '$ldxx'))
output = self.ComputeOutput(spec, arch)
if arch is None and not self.is_mac_bundle:
self.AppendPostbuildVariable(extra_bindings, spec, output, output)
is_executable = spec['type'] == 'executable'
# The ldflags config key is not used on mac or win. On those platforms
# linker flags are set via xcode_settings and msvs_settings, respectively.
env_ldflags = os.environ.get('LDFLAGS', '').split()
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(config_name,
self.ExpandSpecial(generator_default_variables['PRODUCT_DIR']),
self.GypPathToNinja, arch)
ldflags = env_ldflags + ldflags
elif self.flavor == 'win':
manifest_base_name = self.GypPathToUniqueOutput(
self.ComputeOutputFileName(spec))
ldflags, intermediate_manifest, manifest_files = \
self.msvs_settings.GetLdflags(config_name, self.GypPathToNinja,
self.ExpandSpecial, manifest_base_name,
output, is_executable,
self.toplevel_build)
ldflags = env_ldflags + ldflags
self.WriteVariableList(ninja_file, 'manifests', manifest_files)
implicit_deps = implicit_deps.union(manifest_files)
if intermediate_manifest:
self.WriteVariableList(
ninja_file, 'intermediatemanifest', [intermediate_manifest])
command_suffix = _GetWinLinkRuleNameSuffix(
self.msvs_settings.IsEmbedManifest(config_name))
def_file = self.msvs_settings.GetDefFile(self.GypPathToNinja)
if def_file:
implicit_deps.add(def_file)
else:
# Respect environment variables related to build, but target-specific
# flags can still override them.
ldflags = env_ldflags + config.get('ldflags', [])
if is_executable and len(solibs):
rpath = 'lib/'
if self.toolset != 'target':
rpath += self.toolset
ldflags.append('-Wl,-rpath=\$$ORIGIN/%s' % rpath)
ldflags.append('-Wl,-rpath-link=%s' % rpath)
self.WriteVariableList(ninja_file, 'ldflags',
gyp.common.uniquer(map(self.ExpandSpecial, ldflags)))
library_dirs = config.get('library_dirs', [])
if self.flavor == 'win':
library_dirs = [self.msvs_settings.ConvertVSMacros(l, config_name)
for l in library_dirs]
library_dirs = ['/LIBPATH:' + QuoteShellArgument(self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
else:
library_dirs = [QuoteShellArgument('-L' + self.GypPathToNinja(l),
self.flavor)
for l in library_dirs]
libraries = gyp.common.uniquer(map(self.ExpandSpecial,
spec.get('libraries', [])))
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries, config_name)
elif self.flavor == 'win':
libraries = self.msvs_settings.AdjustLibraries(libraries)
self.WriteVariableList(ninja_file, 'libs', library_dirs + libraries)
linked_binary = output
if command in ('solink', 'solink_module'):
extra_bindings.append(('soname', os.path.split(output)[1]))
extra_bindings.append(('lib',
gyp.common.EncodePOSIXShellArgument(output)))
if self.flavor == 'win':
extra_bindings.append(('binary', output))
if '/NOENTRY' not in ldflags:
self.target.import_lib = output + '.lib'
extra_bindings.append(('implibflag',
'/IMPLIB:%s' % self.target.import_lib))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
output = [output, self.target.import_lib]
if pdbname:
output.append(pdbname)
elif not self.is_mac_bundle:
output = [output, output + '.TOC']
else:
command = command + '_notoc'
elif self.flavor == 'win':
extra_bindings.append(('binary', output))
pdbname = self.msvs_settings.GetPDBName(
config_name, self.ExpandSpecial, output + '.pdb')
if pdbname:
output = [output, pdbname]
if len(solibs):
extra_bindings.append(('solibs', gyp.common.EncodePOSIXShellList(solibs)))
ninja_file.build(output, command + command_suffix, link_deps,
implicit=list(implicit_deps),
variables=extra_bindings)
return linked_binary
def WriteTarget(self, spec, config_name, config, link_deps, compile_deps):
extra_link_deps = any(self.target_outputs.get(dep).Linkable()
for dep in spec.get('dependencies', [])
if dep in self.target_outputs)
if spec['type'] == 'none' or (not link_deps and not extra_link_deps):
# TODO(evan): don't call this function for 'none' target types, as
# it doesn't do anything, and we fake out a 'binary' with a stamp file.
self.target.binary = compile_deps
self.target.type = 'none'
elif spec['type'] == 'static_library':
self.target.binary = self.ComputeOutput(spec)
if (self.flavor not in ('mac', 'openbsd', 'win') and not
self.is_standalone_static_library):
self.ninja.build(self.target.binary, 'alink_thin', link_deps,
order_only=compile_deps)
else:
variables = []
if self.xcode_settings:
libtool_flags = self.xcode_settings.GetLibtoolflags(config_name)
if libtool_flags:
variables.append(('libtool_flags', libtool_flags))
if self.msvs_settings:
libflags = self.msvs_settings.GetLibFlags(config_name,
self.GypPathToNinja)
variables.append(('libflags', libflags))
if self.flavor != 'mac' or len(self.archs) == 1:
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', link_deps,
order_only=compile_deps, variables=variables)
else:
inputs = []
for arch in self.archs:
output = self.ComputeOutput(spec, arch)
self.arch_subninjas[arch].build(output, 'alink', link_deps[arch],
order_only=compile_deps,
variables=variables)
inputs.append(output)
# TODO: It's not clear if libtool_flags should be passed to the alink
# call that combines single-arch .a files into a fat .a file.
self.AppendPostbuildVariable(variables, spec,
self.target.binary, self.target.binary)
self.ninja.build(self.target.binary, 'alink', inputs,
# FIXME: test proving order_only=compile_deps isn't
# needed.
variables=variables)
else:
self.target.binary = self.WriteLink(spec, config_name, config, link_deps)
return self.target.binary
def WriteMacBundle(self, spec, mac_bundle_depends, is_empty):
assert self.is_mac_bundle
package_framework = spec['type'] in ('shared_library', 'loadable_module')
output = self.ComputeMacBundleOutput()
if is_empty:
output += '.stamp'
variables = []
self.AppendPostbuildVariable(variables, spec, output, self.target.binary,
is_command_start=not package_framework)
if package_framework and not is_empty:
variables.append(('version', self.xcode_settings.GetFrameworkVersion()))
self.ninja.build(output, 'package_framework', mac_bundle_depends,
variables=variables)
else:
self.ninja.build(output, 'stamp', mac_bundle_depends,
variables=variables)
self.target.bundle = output
return output
def GetSortedXcodeEnv(self, additional_settings=None):
"""Returns the variables Xcode would set for build steps."""
assert self.abs_build_dir
abs_build_dir = self.abs_build_dir
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, abs_build_dir,
os.path.join(abs_build_dir, self.build_to_base), self.config_name,
additional_settings)
def GetSortedXcodePostbuildEnv(self):
"""Returns the variables Xcode would set for postbuild steps."""
postbuild_settings = {}
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE')
if strip_save_file:
postbuild_settings['CHROMIUM_STRIP_SAVE_FILE'] = strip_save_file
return self.GetSortedXcodeEnv(additional_settings=postbuild_settings)
def AppendPostbuildVariable(self, variables, spec, output, binary,
is_command_start=False):
"""Adds a 'postbuild' variable if there is a postbuild for |output|."""
postbuild = self.GetPostbuildCommand(spec, output, binary, is_command_start)
if postbuild:
variables.append(('postbuilds', postbuild))
def GetPostbuildCommand(self, spec, output, output_binary, is_command_start):
"""Returns a shell command that runs all the postbuilds, and removes
|output| if any of them fails. If |is_command_start| is False, then the
returned string will start with ' && '."""
if not self.xcode_settings or spec['type'] == 'none' or not output:
return ''
output = QuoteShellArgument(output, self.flavor)
postbuilds = gyp.xcode_emulation.GetSpecPostbuildCommands(spec, quiet=True)
if output_binary is not None:
postbuilds = self.xcode_settings.AddImplicitPostbuilds(
self.config_name,
os.path.normpath(os.path.join(self.base_to_build, output)),
QuoteShellArgument(
os.path.normpath(os.path.join(self.base_to_build, output_binary)),
self.flavor),
postbuilds, quiet=True)
if not postbuilds:
return ''
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(
['cd', self.build_to_base]))
env = self.ComputeExportEnvString(self.GetSortedXcodePostbuildEnv())
# G will be non-null if any postbuild fails. Run all postbuilds in a
# subshell.
commands = env + ' (' + \
' && '.join([ninja_syntax.escape(command) for command in postbuilds])
command_string = (commands + '); G=$$?; '
# Remove the final output if any postbuild failed.
'((exit $$G) || rm -rf %s) ' % output + '&& exit $$G)')
if is_command_start:
return '(' + command_string + ' && '
else:
return '$ && (' + command_string
def ComputeExportEnvString(self, env):
"""Given an environment, returns a string looking like
'export FOO=foo; export BAR="${FOO} bar;'
that exports |env| to the shell."""
export_str = []
for k, v in env:
export_str.append('export %s=%s;' %
(k, ninja_syntax.escape(gyp.common.EncodePOSIXShellArgument(v))))
return ' '.join(export_str)
def ComputeMacBundleOutput(self):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return self.ExpandSpecial(
os.path.join(path, self.xcode_settings.GetWrapperName()))
def ComputeOutputFileName(self, spec, type=None):
"""Compute the filename of the final output for the current target."""
if not type:
type = spec['type']
default_variables = copy.copy(generator_default_variables)
CalculateVariables(default_variables, {'flavor': self.flavor})
# Compute filename prefix: the product prefix, or a default for
# the product type.
DEFAULT_PREFIX = {
'loadable_module': default_variables['SHARED_LIB_PREFIX'],
'shared_library': default_variables['SHARED_LIB_PREFIX'],
'static_library': default_variables['STATIC_LIB_PREFIX'],
'executable': default_variables['EXECUTABLE_PREFIX'],
}
prefix = spec.get('product_prefix', DEFAULT_PREFIX.get(type, ''))
# Compute filename extension: the product extension, or a default
# for the product type.
DEFAULT_EXTENSION = {
'loadable_module': default_variables['SHARED_LIB_SUFFIX'],
'shared_library': default_variables['SHARED_LIB_SUFFIX'],
'static_library': default_variables['STATIC_LIB_SUFFIX'],
'executable': default_variables['EXECUTABLE_SUFFIX'],
}
extension = spec.get('product_extension')
if extension:
extension = '.' + extension
else:
extension = DEFAULT_EXTENSION.get(type, '')
if 'product_name' in spec:
# If we were given an explicit name, use that.
target = spec['product_name']
else:
# Otherwise, derive a name from the target name.
target = spec['target_name']
if prefix == 'lib':
# Snip out an extra 'lib' from libs if appropriate.
target = StripPrefix(target, 'lib')
if type in ('static_library', 'loadable_module', 'shared_library',
'executable'):
return '%s%s%s' % (prefix, target, extension)
elif type == 'none':
return '%s.stamp' % target
else:
raise Exception('Unhandled output type %s' % type)
def ComputeOutput(self, spec, arch=None):
"""Compute the path for the final output of the spec."""
type = spec['type']
if self.flavor == 'win':
override = self.msvs_settings.GetOutputName(self.config_name,
self.ExpandSpecial)
if override:
return override
if arch is None and self.flavor == 'mac' and type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
filename = self.xcode_settings.GetExecutablePath()
else:
filename = self.ComputeOutputFileName(spec, type)
if arch is None and 'product_dir' in spec:
path = os.path.join(spec['product_dir'], filename)
return self.ExpandSpecial(path)
# Some products go into the output root, libraries go into shared library
# dir, and everything else goes into the normal place.
type_in_output_root = ['executable', 'loadable_module']
if self.flavor == 'mac' and self.toolset == 'target':
type_in_output_root += ['shared_library', 'static_library']
elif self.flavor == 'win' and self.toolset == 'target':
type_in_output_root += ['shared_library']
if arch is not None:
# Make sure partial executables don't end up in a bundle or the regular
# output directory.
archdir = 'arch'
if self.toolset != 'target':
archdir = os.path.join('arch', '%s' % self.toolset)
return os.path.join(archdir, AddArch(filename, arch))
elif type in type_in_output_root or self.is_standalone_static_library:
return filename
elif type == 'shared_library':
libdir = 'lib'
if self.toolset != 'target':
libdir = os.path.join('lib', '%s' % self.toolset)
return os.path.join(libdir, filename)
else:
return self.GypPathToUniqueOutput(filename, qualified=False)
def WriteVariableList(self, ninja_file, var, values):
assert not isinstance(values, str)
if values is None:
values = []
ninja_file.variable(var, ' '.join(values))
def WriteNewNinjaRule(self, name, args, description, is_cygwin, env):
"""Write out a new ninja "rule" statement for a given command.
Returns the name of the new rule, and a copy of |args| with variables
expanded."""
if self.flavor == 'win':
args = [self.msvs_settings.ConvertVSMacros(
arg, self.base_to_build, config=self.config_name)
for arg in args]
description = self.msvs_settings.ConvertVSMacros(
description, config=self.config_name)
elif self.flavor == 'mac':
# |env| is an empty list on non-mac.
args = [gyp.xcode_emulation.ExpandEnvVars(arg, env) for arg in args]
description = gyp.xcode_emulation.ExpandEnvVars(description, env)
# TODO: we shouldn't need to qualify names; we do it because
# currently the ninja rule namespace is global, but it really
# should be scoped to the subninja.
rule_name = self.name
if self.toolset == 'target':
rule_name += '.' + self.toolset
rule_name += '.' + name
rule_name = re.sub('[^a-zA-Z0-9_]', '_', rule_name)
# Remove variable references, but not if they refer to the magic rule
# variables. This is not quite right, as it also protects these for
# actions, not just for rules where they are valid. Good enough.
protect = [ '${root}', '${dirname}', '${source}', '${ext}', '${name}' ]
protect = '(?!' + '|'.join(map(re.escape, protect)) + ')'
description = re.sub(protect + r'\$', '_', description)
# gyp dictates that commands are run from the base directory.
# cd into the directory before running, and adjust paths in
# the arguments to point to the proper locations.
rspfile = None
rspfile_content = None
args = [self.ExpandSpecial(arg, self.base_to_build) for arg in args]
if self.flavor == 'win':
rspfile = rule_name + '.$unique_name.rsp'
# The cygwin case handles this inside the bash sub-shell.
run_in = '' if is_cygwin else ' ' + self.build_to_base
if is_cygwin:
rspfile_content = self.msvs_settings.BuildCygwinBashCommandLine(
args, self.build_to_base)
else:
rspfile_content = gyp.msvs_emulation.EncodeRspFileList(args)
command = ('%s gyp-win-tool action-wrapper $arch ' % sys.executable +
rspfile + run_in)
else:
env = self.ComputeExportEnvString(env)
command = gyp.common.EncodePOSIXShellList(args)
command = 'cd %s; ' % self.build_to_base + env + command
# GYP rules/actions express being no-ops by not touching their outputs.
# Avoid executing downstream dependencies in this case by specifying
# restat=1 to ninja.
self.ninja.rule(rule_name, command, description, restat=True,
rspfile=rspfile, rspfile_content=rspfile_content)
self.ninja.newline()
return rule_name, args
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
global generator_additional_non_configuration_keys
global generator_additional_path_sections
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Ninja generator.
import gyp.generator.xcode as xcode_generator
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
elif flavor == 'win':
default_variables.setdefault('OS', 'win')
default_variables['EXECUTABLE_SUFFIX'] = '.exe'
default_variables['STATIC_LIB_PREFIX'] = ''
default_variables['STATIC_LIB_SUFFIX'] = '.lib'
default_variables['SHARED_LIB_PREFIX'] = ''
default_variables['SHARED_LIB_SUFFIX'] = '.dll'
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'lib'))
default_variables.setdefault('LIB_DIR',
os.path.join('$!PRODUCT_DIR', 'obj'))
def ComputeOutputDir(params):
"""Returns the path from the toplevel_dir to the build output directory."""
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to ninja easier, ninja doesn't put anything here.
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = params.get('generator_flags', {}).get('output_dir', 'out')
# Relative path from source root to our output files. e.g. "out"
return os.path.normpath(os.path.join(generator_dir, output_dir))
def CalculateGeneratorInputInfo(params):
"""Called by __init__ to initialize generator values based on params."""
# E.g. "out/gypfiles"
toplevel = params['options'].toplevel_dir
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, ComputeOutputDir(params), 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def OpenOutput(path, mode='w'):
"""Open |path| for writing, creating directories if necessary."""
gyp.common.EnsureDirExists(path)
return open(path, mode)
def CommandWithWrapper(cmd, wrappers, prog):
wrapper = wrappers.get(cmd, '')
if wrapper:
return wrapper + ' ' + prog
return prog
def GetDefaultConcurrentLinks():
"""Returns a best-guess for a number of concurrent links."""
if sys.platform in ('win32', 'cygwin'):
import ctypes
class MEMORYSTATUSEX(ctypes.Structure):
_fields_ = [
("dwLength", ctypes.c_ulong),
("dwMemoryLoad", ctypes.c_ulong),
("ullTotalPhys", ctypes.c_ulonglong),
("ullAvailPhys", ctypes.c_ulonglong),
("ullTotalPageFile", ctypes.c_ulonglong),
("ullAvailPageFile", ctypes.c_ulonglong),
("ullTotalVirtual", ctypes.c_ulonglong),
("ullAvailVirtual", ctypes.c_ulonglong),
("sullAvailExtendedVirtual", ctypes.c_ulonglong),
]
stat = MEMORYSTATUSEX()
stat.dwLength = ctypes.sizeof(stat)
ctypes.windll.kernel32.GlobalMemoryStatusEx(ctypes.byref(stat))
mem_limit = max(1, stat.ullTotalPhys / (4 * (2 ** 30))) # total / 4GB
hard_cap = max(1, int(os.getenv('GYP_LINK_CONCURRENCY_MAX', 2**32)))
# return min(mem_limit, hard_cap)
# TODO(scottmg): Temporary speculative fix for OOM on builders
# See http://crbug.com/333000.
return 2
elif sys.platform.startswith('linux'):
with open("/proc/meminfo") as meminfo:
memtotal_re = re.compile(r'^MemTotal:\s*(\d*)\s*kB')
for line in meminfo:
match = memtotal_re.match(line)
if not match:
continue
# Allow 8Gb per link on Linux because Gold is quite memory hungry
return max(1, int(match.group(1)) / (8 * (2 ** 20)))
return 1
elif sys.platform == 'darwin':
try:
avail_bytes = int(subprocess.check_output(['sysctl', '-n', 'hw.memsize']))
# A static library debug build of Chromium's unit_tests takes ~2.7GB, so
# 4GB per ld process allows for some more bloat.
return max(1, avail_bytes / (4 * (2 ** 30))) # total / 4GB
except:
return 1
else:
# TODO(scottmg): Implement this for other platforms.
return 1
def _GetWinLinkRuleNameSuffix(embed_manifest):
"""Returns the suffix used to select an appropriate linking rule depending on
whether the manifest embedding is enabled."""
return '_embed' if embed_manifest else ''
def _AddWinLinkRules(master_ninja, embed_manifest):
"""Adds link rules for Windows platform to |master_ninja|."""
def FullLinkCommand(ldcmd, out, binary_type):
resource_name = {
'exe': '1',
'dll': '2',
}[binary_type]
return '%(python)s gyp-win-tool link-with-manifests $arch %(embed)s ' \
'%(out)s "%(ldcmd)s" %(resname)s $mt $rc "$intermediatemanifest" ' \
'$manifests' % {
'python': sys.executable,
'out': out,
'ldcmd': ldcmd,
'resname': resource_name,
'embed': embed_manifest }
rule_name_suffix = _GetWinLinkRuleNameSuffix(embed_manifest)
use_separate_mspdbsrv = (
int(os.environ.get('GYP_USE_SEPARATE_MSPDBSRV', '0')) != 0)
dlldesc = 'LINK%s(DLL) $binary' % rule_name_suffix.upper()
dllcmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo $implibflag /DLL /OUT:$binary '
'@$binary.rsp' % (sys.executable, use_separate_mspdbsrv))
dllcmd = FullLinkCommand(dllcmd, '$binary', 'dll')
master_ninja.rule('solink' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
master_ninja.rule('solink_module' + rule_name_suffix,
description=dlldesc, command=dllcmd,
rspfile='$binary.rsp',
rspfile_content='$libs $in_newline $ldflags',
restat=True,
pool='link_pool')
# Note that ldflags goes at the end so that it has the option of
# overriding default settings earlier in the command line.
exe_cmd = ('%s gyp-win-tool link-wrapper $arch %s '
'$ld /nologo /OUT:$binary @$binary.rsp' %
(sys.executable, use_separate_mspdbsrv))
exe_cmd = FullLinkCommand(exe_cmd, '$binary', 'exe')
master_ninja.rule('link' + rule_name_suffix,
description='LINK%s $binary' % rule_name_suffix.upper(),
command=exe_cmd,
rspfile='$binary.rsp',
rspfile_content='$in_newline $libs $ldflags',
pool='link_pool')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(
os.path.join(ComputeOutputDir(params), config_name))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
master_ninja_file = OpenOutput(os.path.join(toplevel_build, 'build.ninja'))
master_ninja = ninja_syntax.Writer(master_ninja_file, width=120)
# Put build-time support tools in out/{config_name}.
gyp.common.CopyTool(flavor, toplevel_build)
# Grab make settings for CC/CXX.
# The rules are
# - The priority from low to high is gcc/g++, the 'make_global_settings' in
# gyp, the environment variable.
# - If there is no 'make_global_settings' for CC.host/CXX.host or
# 'CC_host'/'CXX_host' enviroment variable, cc_host/cxx_host should be set
# to cc/cxx.
if flavor == 'win':
# Overridden by local arch choice in the use_deps case.
# Chromium's ffmpeg c99conv.py currently looks for a 'cc =' line in
# build.ninja so needs something valid here. http://crbug.com/233985
cc = 'cl.exe'
cxx = 'cl.exe'
ld = 'link.exe'
ld_host = '$ld'
else:
cc = 'cc'
cxx = 'c++'
ld = '$cc'
ldxx = '$cxx'
ld_host = '$cc_host'
ldxx_host = '$cxx_host'
cc_host = None
cxx_host = None
cc_host_global_setting = None
cxx_host_global_setting = None
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings = data[build_file].get('make_global_settings', [])
build_to_root = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
wrappers = {}
for key, value in make_global_settings:
if key == 'CC':
cc = os.path.join(build_to_root, value)
if key == 'CXX':
cxx = os.path.join(build_to_root, value)
if key == 'CC.host':
cc_host = os.path.join(build_to_root, value)
cc_host_global_setting = value
if key == 'CXX.host':
cxx_host = os.path.join(build_to_root, value)
cxx_host_global_setting = value
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = os.path.join(build_to_root, value)
# Support wrappers from environment variables too.
for key, value in os.environ.iteritems():
if key.lower().endswith('_wrapper'):
key_prefix = key[:-len('_wrapper')]
key_prefix = re.sub(r'\.HOST$', '.host', key_prefix)
wrappers[key_prefix] = os.path.join(build_to_root, value)
if flavor == 'win':
cl_paths = gyp.msvs_emulation.GenerateEnvironmentFiles(
toplevel_build, generator_flags, OpenOutput)
for arch, path in cl_paths.iteritems():
master_ninja.variable(
'cl_' + arch, CommandWithWrapper('CC', wrappers,
QuoteShellArgument(path, flavor)))
cc = GetEnvironFallback(['CC_target', 'CC'], cc)
master_ninja.variable('cc', CommandWithWrapper('CC', wrappers, cc))
cxx = GetEnvironFallback(['CXX_target', 'CXX'], cxx)
master_ninja.variable('cxx', CommandWithWrapper('CXX', wrappers, cxx))
if flavor == 'win':
master_ninja.variable('ld', ld)
master_ninja.variable('idl', 'midl.exe')
master_ninja.variable('ar', 'lib.exe')
master_ninja.variable('rc', 'rc.exe')
master_ninja.variable('asm', 'ml.exe')
master_ninja.variable('mt', 'mt.exe')
else:
master_ninja.variable('ld', CommandWithWrapper('LINK', wrappers, ld))
master_ninja.variable('ldxx', CommandWithWrapper('LINK', wrappers, ldxx))
master_ninja.variable('ar', GetEnvironFallback(['AR_target', 'AR'], 'ar'))
if generator_supports_multiple_toolsets:
if not cc_host:
cc_host = cc
if not cxx_host:
cxx_host = cxx
master_ninja.variable('ar_host', GetEnvironFallback(['AR_host'], 'ar'))
cc_host = GetEnvironFallback(['CC_host'], cc_host)
cxx_host = GetEnvironFallback(['CXX_host'], cxx_host)
# The environment variable could be used in 'make_global_settings', like
# ['CC.host', '$(CC)'] or ['CXX.host', '$(CXX)'], transform them here.
if '$(CC)' in cc_host and cc_host_global_setting:
cc_host = cc_host_global_setting.replace('$(CC)', cc)
if '$(CXX)' in cxx_host and cxx_host_global_setting:
cxx_host = cxx_host_global_setting.replace('$(CXX)', cxx)
master_ninja.variable('cc_host',
CommandWithWrapper('CC.host', wrappers, cc_host))
master_ninja.variable('cxx_host',
CommandWithWrapper('CXX.host', wrappers, cxx_host))
if flavor == 'win':
master_ninja.variable('ld_host', ld_host)
else:
master_ninja.variable('ld_host', CommandWithWrapper(
'LINK', wrappers, ld_host))
master_ninja.variable('ldxx_host', CommandWithWrapper(
'LINK', wrappers, ldxx_host))
master_ninja.newline()
master_ninja.pool('link_pool', depth=GetDefaultConcurrentLinks())
master_ninja.newline()
deps = 'msvc' if flavor == 'win' else 'gcc'
if flavor != 'win':
master_ninja.rule(
'cc',
description='CC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'cc_s',
description='CC $out',
command=('$cc $defines $includes $cflags $cflags_c '
'$cflags_pch_c -c $in -o $out'))
master_ninja.rule(
'cxx',
description='CXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_cc '
'$cflags_pch_cc -c $in -o $out'),
depfile='$out.d',
deps=deps)
else:
# TODO(scottmg) Separate pdb names is a test to see if it works around
# http://crbug.com/142362. It seems there's a race between the creation of
# the .pdb by the precompiled header step for .cc and the compilation of
# .c files. This should be handled by mspdbsrv, but rarely errors out with
# c1xx : fatal error C1033: cannot open program database
# By making the rules target separate pdb files this might be avoided.
cc_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cc /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_c ')
cxx_command = ('ninja -t msvc -e $arch ' +
'-- '
'$cxx /nologo /showIncludes /FC '
'@$out.rsp /c $in /Fo$out /Fd$pdbname_cc ')
master_ninja.rule(
'cc',
description='CC $out',
command=cc_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_c',
deps=deps)
master_ninja.rule(
'cxx',
description='CXX $out',
command=cxx_command,
rspfile='$out.rsp',
rspfile_content='$defines $includes $cflags $cflags_cc',
deps=deps)
master_ninja.rule(
'idl',
description='IDL $in',
command=('%s gyp-win-tool midl-wrapper $arch $outdir '
'$tlb $h $dlldata $iid $proxy $in '
'$idlflags' % sys.executable))
master_ninja.rule(
'rc',
description='RC $in',
# Note: $in must be last otherwise rc.exe complains.
command=('%s gyp-win-tool rc-wrapper '
'$arch $rc $defines $resource_includes $rcflags /fo$out $in' %
sys.executable))
master_ninja.rule(
'asm',
description='ASM $in',
command=('%s gyp-win-tool asm-wrapper '
'$arch $asm $defines $includes /c /Fo $out $in' %
sys.executable))
if flavor != 'mac' and flavor != 'win':
master_ninja.rule(
'alink',
description='AR $out',
command='rm -f $out && $ar rcs $out $in')
master_ninja.rule(
'alink_thin',
description='AR $out',
command='rm -f $out && $ar rcsT $out $in')
# This allows targets that only need to depend on $lib's API to declare an
# order-only dependency on $lib.TOC and avoid relinking such downstream
# dependencies when $lib changes only in non-public ways.
# The resulting string leaves an uninterpolated %{suffix} which
# is used in the final substitution below.
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ]; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then mv ${lib}.tmp ${lib}.TOC ; '
'fi; fi'
% { 'solink':
'$ld -shared $ldflags -o $lib -Wl,-soname=$soname %(suffix)s',
'extract_toc':
('{ readelf -d ${lib} | grep SONAME ; '
'nm -gD -f p ${lib} | cut -f1-2 -d\' \'; }')})
master_ninja.rule(
'solink',
description='SOLINK $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--whole-archive $in $solibs -Wl,--no-whole-archive '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib',
restat=True,
command=(mtime_preserving_solink_base % {
'suffix': '-Wl,--start-group $in $solibs -Wl,--end-group '
'$libs'}),
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out',
command=('$ld $ldflags -o $out '
'-Wl,--start-group $in $solibs -Wl,--end-group $libs'),
pool='link_pool')
elif flavor == 'win':
master_ninja.rule(
'alink',
description='LIB $out',
command=('%s gyp-win-tool link-wrapper $arch False '
'$ar /nologo /ignore:4221 /OUT:$out @$out.rsp' %
sys.executable),
rspfile='$out.rsp',
rspfile_content='$in_newline $libflags')
_AddWinLinkRules(master_ninja, embed_manifest=True)
_AddWinLinkRules(master_ninja, embed_manifest=False)
else:
master_ninja.rule(
'objc',
description='OBJC $out',
command=('$cc -MMD -MF $out.d $defines $includes $cflags $cflags_objc '
'$cflags_pch_objc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'objcxx',
description='OBJCXX $out',
command=('$cxx -MMD -MF $out.d $defines $includes $cflags $cflags_objcc '
'$cflags_pch_objcc -c $in -o $out'),
depfile='$out.d',
deps=deps)
master_ninja.rule(
'alink',
description='LIBTOOL-STATIC $out, POSTBUILDS',
command='rm -f $out && '
'./gyp-mac-tool filter-libtool libtool $libtool_flags '
'-static -o $out $in'
'$postbuilds')
master_ninja.rule(
'lipo',
description='LIPO $out, POSTBUILDS',
command='rm -f $out && lipo -create $in -output $out$postbuilds')
# Record the public interface of $lib in $lib.TOC. See the corresponding
# comment in the posix section above for details.
solink_base = '$ld %(type)s $ldflags -o $lib %(suffix)s'
mtime_preserving_solink_base = (
'if [ ! -e $lib -o ! -e ${lib}.TOC ] || '
# Always force dependent targets to relink if this library
# reexports something. Handling this correctly would require
# recursive TOC dumping but this is rare in practice, so punt.
'otool -l $lib | grep -q LC_REEXPORT_DYLIB ; then '
'%(solink)s && %(extract_toc)s > ${lib}.TOC; '
'else '
'%(solink)s && %(extract_toc)s > ${lib}.tmp && '
'if ! cmp -s ${lib}.tmp ${lib}.TOC; then '
'mv ${lib}.tmp ${lib}.TOC ; '
'fi; '
'fi'
% { 'solink': solink_base,
'extract_toc':
'{ otool -l $lib | grep LC_ID_DYLIB -A 5; '
'nm -gP $lib | cut -f1-2 -d\' \' | grep -v U$$; true; }'})
solink_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_suffix,
'type': '-shared'},
pool='link_pool')
master_ninja.rule(
'solink_notoc',
description='SOLINK $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix':solink_suffix, 'type': '-shared'},
pool='link_pool')
solink_module_suffix = '$in $solibs $libs$postbuilds'
master_ninja.rule(
'solink_module',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=mtime_preserving_solink_base % {'suffix': solink_module_suffix,
'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'solink_module_notoc',
description='SOLINK(module) $lib, POSTBUILDS',
restat=True,
command=solink_base % {'suffix': solink_module_suffix, 'type': '-bundle'},
pool='link_pool')
master_ninja.rule(
'link',
description='LINK $out, POSTBUILDS',
command=('$ld $ldflags -o $out '
'$in $solibs $libs$postbuilds'),
pool='link_pool')
master_ninja.rule(
'preprocess_infoplist',
description='PREPROCESS INFOPLIST $out',
command=('$cc -E -P -Wno-trigraphs -x c $defines $in -o $out && '
'plutil -convert xml1 $out $out'))
master_ninja.rule(
'copy_infoplist',
description='COPY INFOPLIST $in',
command='$env ./gyp-mac-tool copy-info-plist $in $out $keys')
master_ninja.rule(
'mac_tool',
description='MACTOOL $mactool_cmd $in',
command='$env ./gyp-mac-tool $mactool_cmd $in $out')
master_ninja.rule(
'package_framework',
description='PACKAGE FRAMEWORK $out, POSTBUILDS',
command='./gyp-mac-tool package-framework $out $version$postbuilds '
'&& touch $out')
if flavor == 'win':
master_ninja.rule(
'stamp',
description='STAMP $out',
command='%s gyp-win-tool stamp $out' % sys.executable)
master_ninja.rule(
'copy',
description='COPY $in $out',
command='%s gyp-win-tool recursive-mirror $in $out' % sys.executable)
else:
master_ninja.rule(
'stamp',
description='STAMP $out',
command='${postbuilds}touch $out')
master_ninja.rule(
'copy',
description='COPY $in $out',
command='rm -rf $out && cp -af $in $out')
master_ninja.newline()
all_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_targets.add(target)
all_outputs = set()
# target_outputs is a map from qualified target name to a Target object.
target_outputs = {}
# target_short_names is a map from target short name to a list of Target
# objects.
target_short_names = {}
for qualified_target in target_list:
# qualified_target is like: third_party/icu/icu.gyp:icui18n#target
build_file, name, toolset = \
gyp.common.ParseQualifiedTarget(qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
spec = target_dicts[qualified_target]
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
build_file = gyp.common.RelativePath(build_file, options.toplevel_dir)
base_path = os.path.dirname(build_file)
obj = 'obj'
if toolset != 'target':
obj += '.' + toolset
output_file = os.path.join(obj, base_path, name + '.ninja')
ninja_output = StringIO()
writer = NinjaWriter(qualified_target, target_outputs, base_path, build_dir,
ninja_output,
toplevel_build, output_file,
flavor, toplevel_dir=options.toplevel_dir)
target = writer.WriteSpec(spec, config_name, generator_flags)
if ninja_output.tell() > 0:
# Only create files for ninja files that actually have contents.
with OpenOutput(os.path.join(toplevel_build, output_file)) as ninja_file:
ninja_file.write(ninja_output.getvalue())
ninja_output.close()
master_ninja.subninja(output_file)
if target:
if name != target.FinalOutput() and spec['toolset'] == 'target':
target_short_names.setdefault(name, []).append(target)
target_outputs[qualified_target] = target
if qualified_target in all_targets:
all_outputs.add(target.FinalOutput())
if target_short_names:
# Write a short name to build this target. This benefits both the
# "build chrome" case as well as the gyp tests, which expect to be
# able to run actions and build libraries by their short name.
master_ninja.newline()
master_ninja.comment('Short names for targets.')
for short_name in target_short_names:
master_ninja.build(short_name, 'phony', [x.FinalOutput() for x in
target_short_names[short_name]])
if all_outputs:
master_ninja.newline()
master_ninja.build('all', 'phony', list(all_outputs))
master_ninja.default(generator_flags.get('default_target', 'all'))
master_ninja_file.close()
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
builddir = os.path.join(options.toplevel_dir, 'out', config)
arguments = ['ninja', '-C', builddir]
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
(target_list, target_dicts, data, params, config_name) = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
# Update target_dicts for iOS device builds.
target_dicts = gyp.xcode_emulation.CloneConfigurationForDeviceAndEmulator(
target_dicts)
user_config = params.get('generator_flags', {}).get('config', None)
if gyp.common.GetFlavor(params) == 'win':
target_list, target_dicts = MSVSUtil.ShardTargets(target_list, target_dicts)
target_list, target_dicts = MSVSUtil.InsertLargePdbShims(
target_list, target_dicts, generator_default_variables)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append(
(target_list, target_dicts, data, params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
sinbazhou/odoo | addons/l10n_uk/__openerp__.py | 260 | 1999 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Smartmode LTD (<http://www.smartmode.co.uk>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'UK - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
This is the latest UK OpenERP localisation necessary to run OpenERP accounting for UK SME's with:
=================================================================================================
- a CT600-ready chart of accounts
- VAT100-ready tax structure
- InfoLogic UK counties listing
- a few other adaptations""",
'author': 'SmartMode LTD',
'website': 'http://www.smartmode.co.uk',
'depends': ['base_iban', 'base_vat', 'account_chart', 'account_anglo_saxon'],
'data': [
'data/account.account.type.csv',
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account.chart.template.csv',
'data/account.tax.template.csv',
'data/res.country.state.csv',
'l10n_uk_wizard.xml',
],
'demo' : ['demo/demo.xml'],
'installable': 'True',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
magic2du/contact_matrix | Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_04_27_2015_parallel_for_final.py | 1 | 43408 |
# coding: utf-8
# In[5]:
import sys, os
sys.path.append('../../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[6]:
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
# set settings for this script
settings = {}
settings['filename'] = 'ddi_examples_40_60_over2top_diff_name_2014.txt'
settings['fisher_mode'] = 'FisherM1ONLY'# settings['fisher_mode'] = 'FisherM1ONLY'
settings['with_auc_score'] = False
settings['reduce_ratio'] = 1
settings['SVM'] = 1
settings['DL'] = 1
settings['SAE_SVM'] = 1
settings['SAE_SVM_COMBO'] = 1
settings['SVM_RBF'] = 1
settings['SAE_SVM_RBF'] = 1
settings['SAE_SVM_RBF_COMBO'] = 1
settings['SVM_POLY'] = 0
settings['DL_S'] = 1
settings['DL_U'] = 0
settings['finetune_lr'] = 1
settings['batch_size'] = 100
settings['pretraining_interations'] = 5002
settings['pretrain_lr'] = 0.001
settings['training_epochs'] = 20000 # change epochs for split net
settings['hidden_layers_sizes'] = [100, 100]
settings['corruption_levels'] = [0, 0]
filename = settings['filename']
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_contact_matrix_load' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Input DDI file: ' + filename)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[6]:
# In[7]:
class DDI_family_base(object):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/big/du/Protein_Protein_Interaction_Project/Contact_Matrix_Project/Vectors_Fishers_aaIndex_raw_2014_paper/'):
""" get total number of sequences in a ddi familgy
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
LOO_data['FisherM1'][1]
"""
self.ddi = ddi
self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
self.total_number_of_sequences = self.get_total_number_of_sequences()
self.raw_data = {}
self.positve_negative_number = {}
self.equal_size_data = {}
for seq_no in range(1, self.total_number_of_sequences+1):
self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
try:
#positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(positive_file)
#lines = file_obj.readStripLines()
#import pdb; pdb.set_trace()
count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
count_neg = self.raw_data[seq_no].shape[0] - count_pos
#self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
#assert int(float(lines[0])) == count_pos
self.positve_negative_number[seq_no] = {'numPos': count_pos}
#negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(negative_file)
#lines = file_obj.readStripLines()
#self.positve_negative_number[seq_no]['numNeg'] = int(float(lines[0]))
self.positve_negative_number[seq_no]['numNeg'] = count_neg
except Exception,e:
print ddi, seq_no
print str(e)
logger.info(ddi + str(seq_no))
logger.info(str(e))
# get data for equal positive and negative
n_pos = self.positve_negative_number[seq_no]['numPos']
n_neg = self.positve_negative_number[seq_no]['numNeg']
index_neg = range(n_pos, n_pos + n_neg)
random.shuffle(index_neg)
index_neg = index_neg[: n_pos]
positive_examples = self.raw_data[seq_no][ : n_pos, :]
negative_examples = self.raw_data[seq_no][index_neg, :]
self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get the leave one out traing data, reduced traing
Parameters:
seq_no:
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_LOO = np.array([])
train_y_LOO = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
total_number_of_sequences = self.total_number_of_sequences
equal_size_data_selected_sequence = self.equal_size_data[seq_no]
#get test data for selected sequence
test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
total_sequences = range(1, total_number_of_sequences+1)
loo_sequences = [i for i in total_sequences if i != seq_no]
number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
random.shuffle(loo_sequences)
reduced_sequences = loo_sequences[:number_of_reduced]
#for loo data
for current_no in loo_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_LOO.ndim ==1:
train_X_LOO = current_X
else:
train_X_LOO = np.vstack((train_X_LOO, current_X))
train_y_LOO = np.concatenate((train_y_LOO, current_y))
#for reduced data
for current_no in reduced_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
#def get_ten_fold_crossvalid_one_subset(self, start_subset, end_subset, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
def get_ten_fold_crossvalid_one_subset(self, train_index, test_index, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get traing data, reduced traing data for 10-fold crossvalidation
Parameters:
start_subset: index of start of the testing data
end_subset: index of end of the testing data
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_10fold = np.array([])
train_y_10fold = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
test_X = np.array([])
test_y = np.array([])
total_number_of_sequences = self.total_number_of_sequences
#get test data for selected sequence
#for current_no in range(start_subset, end_subset):
for num in test_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if test_X.ndim ==1:
test_X = current_X
else:
test_X = np.vstack((test_X, current_X))
test_y = np.concatenate((test_y, current_y))
#total_sequences = range(1, total_number_of_sequences+1)
#ten_fold_sequences = [i for i in total_sequences if not(i in range(start_subset, end_subset))]
#number_of_reduced = len(ten_fold_sequences)/reduce_ratio if len(ten_fold_sequences)/reduce_ratio !=0 else 1
#random.shuffle(ten_fold_sequences)
#reduced_sequences = ten_fold_sequences[:number_of_reduced]
number_of_reduced = len(train_index)/reduce_ratio if len(train_index)/reduce_ratio !=0 else 1
random.shuffle(train_index)
reduced_sequences = train_index[:number_of_reduced]
#for 10-fold cross-validation data
#for current_no in ten_fold_sequences:
for num in train_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_10fold.ndim ==1:
train_X_10fold = current_X
else:
train_X_10fold = np.vstack((train_X_10fold, current_X))
train_y_10fold = np.concatenate((train_y_10fold, current_y))
#for reduced data
for num in reduced_sequences:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
def get_total_number_of_sequences(self):
""" get total number of sequences in a ddi familgy
Parameters:
ddi: string
Vectors_Fishers_aaIndex_raw_folder: string
Returns:
n: int
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path +'allPairs.txt'
all_pairs = np.loadtxt(filename, 'float32')
return len(all_pairs)
def get_raw_data_for_selected_seq(self, seq_no):
""" get raw data for selected seq no in a family
Parameters:
ddi:
seq_no:
Returns:
data: raw data in the sequence file
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
data = np.loadtxt(filename, 'float32')
return data
def select_X_y(self, data, fisher_mode = ''):
""" select subset from the raw input data set
Parameters:
data: data from matlab txt file
fisher_mode: subset base on this Fisher of AAONLY...
Returns:
selected X, y
"""
y = data[:,-1] # get lable
if fisher_mode == 'FisherM1': # fisher m1 plus AA index
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
elif fisher_mode == 'FisherM1ONLY':
a = data[:, 20:40]
b = data[:, 247:267]
X = np.hstack((a,b))
elif fisher_mode == 'AAONLY':
a = data[:, 40:227]
b = data[:, 267:454]
X = np.hstack((a,b))
else:
raise('there is an error in mode')
return X, y
# In[7]:
# In[7]:
# In[8]:
import sklearn.preprocessing
def saveAsCsv(with_auc_score, fname, score_dict, arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if with_auc_score == False:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerow([i for i in arg])
csvfile.close()
def LOO_out_performance_for_all(ddis):
for ddi in ddis:
try:
one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
one_ddi_family.get_LOO_perfermance(settings = settings)
except Exception,e:
print str(e)
logger.info("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
class LOO_out_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_LOO_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
print seq_no
logger.info('sequence number: ' + str(seq_no))
if settings['SVM']:
print "SVM"
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_LOO_training_and_reduced_traing(seq_no,fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# Deep learning part
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_LOO)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_epochs'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
# direct deep learning
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = 1500, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if 0:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs_for_reduced = cal_epochs(1500, pretraining_X_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs_for_reduced,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = cal_epochs(settings['training_epochs'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' +str(training_epochs) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(y_test, test_predicted, with_auc_score), analysis_scr)
# In[9]:
#for 10-fold cross validation
def ten_fold_crossvalid_performance_for_all(ddis):
for ddi in ddis:
try:
process_one_ddi_tenfold(ddi)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
def process_one_ddi_tenfold(ddi):
"""A function to waste CPU cycles"""
logger.info('DDI: %s' % ddi)
try:
one_ddi_family = {}
one_ddi_family[ddi] = Ten_fold_crossvalid_performance_for_one_ddi(ddi)
one_ddi_family[ddi].get_ten_fold_crossvalid_perfermance(settings=settings)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
return None
class Ten_fold_crossvalid_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_ten_fold_crossvalid_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
with_auc_score = settings['with_auc_score']
reduce_ratio = settings['reduce_ratio']
#for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
#subset_size = math.floor(self.ddi_obj.total_number_of_sequences / 10.0)
kf = KFold(self.ddi_obj.total_number_of_sequences, n_folds = 10, shuffle = True)
#for subset_no in range(1, 11):
for ((train_index, test_index),subset_no) in izip(kf,range(1,11)):
#for train_index, test_index in kf;
print("Subset:", subset_no)
print("Train index: ", train_index)
print("Test index: ", test_index)
#logger.info('subset number: ' + str(subset_no))
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(train_index, test_index, fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
if settings['SVM']:
print "SVM"
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_POLY']:
print "SVM_POLY"
L1_SVC_POLY_Selector = SVC(C=1, kernel='poly').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_POLY_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_POLY_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_POLY', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
#### new prepresentation
x = X_train_pre_validation_minmax
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(X_train_pre_validation_minmax)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
standard_scaler = preprocessing.StandardScaler().fit(new_x_train_minmax_A)
new_x_train_scaled = standard_scaler.transform(new_x_train_minmax_A)
new_x_test_scaled = standard_scaler.transform(new_x_test_minmax_A)
new_x_train_combo = np.hstack((scaled_train_X, new_x_train_scaled))
new_x_test_combo = np.hstack((scaled_test_X, new_x_test_scaled))
if settings['SAE_SVM']:
print 'SAE followed by SVM'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF']:
print 'SAE followed by SVM RBF'
x = X_train_pre_validation_minmax
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_scaled, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_scaled)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_scaled)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_COMBO']:
print 'SAE followed by SVM with combo feature'
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_combo, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_combo)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_COMBO', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_combo)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_COMBO', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SAE_SVM_RBF_COMBO']:
print 'SAE followed by SVM RBF with combo feature'
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(new_x_train_combo, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(new_x_test_combo)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF_COMBO', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(new_x_train_combo)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM_RBF_COMBO', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['DL_U']:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_X_minmax = min_max_scaler.transform(train_X_10fold)
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
if settings['DL_S']:
# deep learning using split network
y_test = test_y
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
training_epochs = 20001
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, with_auc_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, with_auc_score).values()))
report_name = filename + '_' + '_test10fold_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' + str(training_epochs) + '_' + current_date
saveAsCsv(with_auc_score, report_name, performance_score(test_y, predicted_test_y, with_auc_score), analysis_scr)
# In[10]:
#LOO_out_performance_for_all(ddis)
#LOO_out_performance_for_all(ddis)
from multiprocessing import Pool
pool = Pool(8)
pool.map(process_one_ddi_tenfold, ddis[:])
pool.close()
pool.join()
# In[25]:
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
| gpl-2.0 |
shepdelacreme/ansible | lib/ansible/modules/network/nxos/nxos_vxlan_vtep.py | 25 | 14074 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vxlan_vtep
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VXLAN Network Virtualization Endpoint (NVE).
description:
- Manages VXLAN Network Virtualization Endpoint (NVE) overlay interface
that terminates VXLAN tunnels.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- The module is used to manage NVE properties, not to create NVE
interfaces. Use M(nxos_interface) if you wish to do so.
- C(state=absent) removes the interface.
- Default, where supported, restores params default value.
options:
interface:
description:
- Interface name for the VXLAN Network Virtualization Endpoint.
required: true
description:
description:
- Description of the NVE interface.
host_reachability:
description:
- Specify mechanism for host reachability advertisement.
type: bool
shutdown:
description:
- Administratively shutdown the NVE interface.
type: bool
source_interface:
description:
- Specify the loopback interface whose IP address should be
used for the NVE interface.
source_interface_hold_down_time:
description:
- Suppresses advertisement of the NVE loopback address until
the overlay has converged.
global_mcast_group_L3:
description:
- Global multicast ip prefix for L3 VNIs or the keyword 'default'
This is available on NX-OS 9K series running 9.2.x or higher.
version_added: "2.8"
global_mcast_group_L2:
description:
- Global multicast ip prefix for L2 VNIs or the keyword 'default'
This is available on NX-OS 9K series running 9.2.x or higher.
version_added: "2.8"
global_suppress_arp:
description:
- Enables ARP suppression for all VNIs
This is available on NX-OS 9K series running 9.2.x or higher.
type: bool
version_added: "2.8"
global_ingress_replication_bgp:
description:
- Configures ingress replication protocol as bgp for all VNIs
This is available on NX-OS 9K series running 9.2.x or higher.
type: bool
version_added: "2.8"
state:
description:
- Determines whether the config should be present or not
on the device.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- nxos_vxlan_vtep:
interface: nve1
description: default
host_reachability: default
source_interface: Loopback0
source_interface_hold_down_time: 30
shutdown: default
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["interface nve1", "source-interface loopback0",
"source-interface hold-down-time 30", "description simple description",
"shutdown", "host-reachability protocol bgp"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'shutdown',
'host_reachability',
'global_ingress_replication_bgp',
'global_suppress_arp',
]
PARAM_TO_COMMAND_KEYMAP = {
'description': 'description',
'global_suppress_arp': 'global suppress-arp',
'global_ingress_replication_bgp': 'global ingress-replication protocol bgp',
'global_mcast_group_L3': 'global mcast-group L3',
'global_mcast_group_L2': 'global mcast-group L2',
'host_reachability': 'host-reachability protocol bgp',
'interface': 'interface',
'shutdown': 'shutdown',
'source_interface': 'source-interface',
'source_interface_hold_down_time': 'source-interface hold-down-time'
}
PARAM_TO_DEFAULT_KEYMAP = {
'description': False,
'shutdown': True,
'source_interface_hold_down_time': '180',
}
def get_value(arg, config, module):
if arg in BOOL_PARAMS:
REGEX = re.compile(r'\s+{0}\s*$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
NO_SHUT_REGEX = re.compile(r'\s+no shutdown\s*$', re.M)
value = False
if arg == 'shutdown':
try:
if NO_SHUT_REGEX.search(config):
value = False
elif REGEX.search(config):
value = True
except TypeError:
value = False
else:
try:
if REGEX.search(config):
value = True
except TypeError:
value = False
else:
REGEX = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
NO_DESC_REGEX = re.compile(r'\s+{0}\s*$'.format('no description'), re.M)
SOURCE_INTF_REGEX = re.compile(r'(?:{0}\s)(?P<value>\S+)$'.format(PARAM_TO_COMMAND_KEYMAP[arg]), re.M)
value = ''
if arg == 'description':
if NO_DESC_REGEX.search(config):
value = False
elif PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value').strip()
elif arg == 'source_interface':
for line in config.splitlines():
try:
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = SOURCE_INTF_REGEX.search(config).group('value').strip()
break
except AttributeError:
value = ''
elif arg == 'global_mcast_group_L2':
for line in config.splitlines():
try:
if 'global mcast-group' in line and 'L2' in line:
value = line.split()[2].strip()
break
except AttributeError:
value = ''
elif arg == 'global_mcast_group_L3':
for line in config.splitlines():
try:
if 'global mcast-group' in line and 'L3' in line:
value = line.split()[2].strip()
break
except AttributeError:
value = ''
else:
if PARAM_TO_COMMAND_KEYMAP[arg] in config:
value = REGEX.search(config).group('value').strip()
return value
def get_existing(module, args):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module, flags=['all']))
interface_string = 'interface {0}'.format(module.params['interface'].lower())
parents = [interface_string]
config = netcfg.get_section(parents)
if config:
for arg in args:
existing[arg] = get_value(arg, config, module)
existing['interface'] = module.params['interface'].lower()
else:
if interface_string in str(netcfg):
existing['interface'] = module.params['interface'].lower()
for arg in args:
existing[arg] = ''
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = value
else:
new_dict[new_key] = value
return new_dict
def fix_commands(commands, module):
source_interface_command = ''
no_source_interface_command = ''
no_host_reachability_command = ''
host_reachability_command = ''
for command in commands:
if 'no source-interface hold-down-time' in command:
pass
elif 'source-interface hold-down-time' in command:
pass
elif 'no source-interface' in command:
no_source_interface_command = command
elif 'source-interface' in command:
source_interface_command = command
elif 'no host-reachability' in command:
no_host_reachability_command = command
elif 'host-reachability' in command:
host_reachability_command = command
if host_reachability_command:
commands.pop(commands.index(host_reachability_command))
commands.insert(0, host_reachability_command)
if source_interface_command:
commands.pop(commands.index(source_interface_command))
commands.insert(0, source_interface_command)
if no_host_reachability_command:
commands.pop(commands.index(no_host_reachability_command))
commands.append(no_host_reachability_command)
if no_source_interface_command:
commands.pop(commands.index(no_source_interface_command))
commands.append(no_source_interface_command)
commands.insert(0, 'terminal dont-ask')
return commands
def state_present(module, existing, proposed, candidate):
commands = list()
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif value == 'default':
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if 'global mcast-group' in key:
commands.append('no {0}'.format(key))
else:
commands.append('no {0} {1}'.format(key, existing_value))
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
commands.append('no {0}'.format(key.lower()))
module.exit_json(commands=commands)
else:
if 'L2' in key:
commands.append('global mcast-group ' + value + ' L2')
elif 'L3' in key:
commands.append('global mcast-group ' + value + ' L3')
else:
command = '{0} {1}'.format(key, value.lower())
commands.append(command)
if commands:
commands = fix_commands(commands, module)
parents = ['interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=parents)
else:
if not existing and module.params['interface']:
commands = ['interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=[])
def state_absent(module, existing, proposed, candidate):
commands = ['no interface {0}'.format(module.params['interface'].lower())]
candidate.add(commands, parents=[])
def main():
argument_spec = dict(
interface=dict(required=True, type='str'),
description=dict(required=False, type='str'),
host_reachability=dict(required=False, type='bool'),
global_ingress_replication_bgp=dict(required=False, type='bool'),
global_suppress_arp=dict(required=False, type='bool'),
global_mcast_group_L2=dict(required=False, type='str'),
global_mcast_group_L3=dict(required=False, type='str'),
shutdown=dict(required=False, type='bool'),
source_interface=dict(required=False, type='str'),
source_interface_hold_down_time=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
mutually_exclusive = [('global_ingress_replication_bgp', 'global_mcast_group_L2')]
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True,
)
warnings = list()
result = {'changed': False, 'commands': [], 'warnings': warnings}
check_args(module, warnings)
state = module.params['state']
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args)
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key != 'interface':
if str(value).lower() == 'default':
value = PARAM_TO_DEFAULT_KEYMAP.get(key)
if value is None:
if key in BOOL_PARAMS:
value = False
else:
value = 'default'
if str(existing.get(key)).lower() != str(value).lower():
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
if not existing:
warnings.append("The proposed NVE interface did not exist. "
"It's recommended to use nxos_interface to create "
"all logical interfaces.")
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, proposed, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
result['changed'] = True
load_config(module, candidate)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
erikriver/eduIntelligent-cynin | src/ubify.coretypes/ubify/coretypes/content/image.py | 5 | 2974 | ###############################################################################
#cyn.in is an open source Collaborative Knowledge Management Appliance that
#enables teams to seamlessly work together on files, documents and content in
#a secure central environment.
#
#cyn.in v2 an open source appliance is distributed under the GPL v3 license
#along with commercial support options.
#
#cyn.in is a Cynapse Invention.
#
#Copyright (C) 2008 Cynapse India Pvt. Ltd.
#
#This program is free software: you can redistribute it and/or modify it under
#the terms of the GNU General Public License as published by the Free Software
#Foundation, either version 3 of the License, or any later version and observe
#the Additional Terms applicable to this program and must display appropriate
#legal notices. In accordance with Section 7(b) of the GNU General Public
#License version 3, these Appropriate Legal Notices must retain the display of
#the "Powered by cyn.in" AND "A Cynapse Invention" logos. You should have
#received a copy of the detailed Additional Terms License with this program.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
#Public License for more details.
#
#You should have received a copy of the GNU General Public License along with
#this program. If not, see <http://www.gnu.org/licenses/>.
#
#You can contact Cynapse at support@cynapse.com with any problems with cyn.in.
#For any queries regarding the licensing, please send your mails to
# legal@cynapse.com
#
#You can also contact Cynapse at:
#802, Building No. 1,
#Dheeraj Sagar, Malad(W)
#Mumbai-400064, India
###############################################################################
from Products.Archetypes.atapi import *
from Products.ATContentTypes.content.image \
import ATImage as BaseClass
from Products.ATContentTypes.content.image \
import ATImageSchema as DefaultSchema
from Products.ATContentTypes.content.base import registerATCT
from ubify.coretypes.config import PROJECTNAME
from zope.event import notify
from Products.Archetypes.event import ObjectInitializedEvent, ObjectEditedEvent
schema = DefaultSchema.copy()
class ATImage(BaseClass):
__doc__ = BaseClass.__doc__ + "(customizable version)"
portal_type = BaseClass.portal_type
archetype_name = BaseClass.archetype_name
schema = schema
def manage_afterPUT(self, data, marshall_data, file, context, mimetype,filename, REQUEST, RESPONSE):
is_new = False
title = self.Title()
if not title:
is_new = True
BaseClass.manage_afterPUT(self, data, marshall_data, file, context, mimetype, filename, REQUEST, RESPONSE)
if is_new:
notify(ObjectInitializedEvent(self))
else:
notify(ObjectEditedEvent(self))
registerATCT(ATImage, PROJECTNAME)
| gpl-3.0 |
maskinskrift/djangocms-classbased-column | djangocms_classbasedcolumn/cms_plugins.py | 1 | 1866 | from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from .forms import MultiColumnForm
from .models import MultiColumns, Column
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
class MultiColumnPlugin(CMSPluginBase):
model = MultiColumns
module = _("Multi Columns Classbased")
name = _("Multi Columns Classbased")
render_template = "cms/plugins/multi_column.html"
allow_children = True
child_classes = ["ColumnPlugin"]
form = MultiColumnForm
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder': placeholder,
})
return context
def save_model(self, request, obj, form, change):
response = super(MultiColumnPlugin,
self).save_model(request, obj, form, change)
for x in range(int(form.cleaned_data['create'])):
col = Column(parent=obj, placeholder=obj.placeholder,
language=obj.language,
class_name=form.cleaned_data['create_class_name'],
position=CMSPlugin.objects.filter(parent=obj).count(),
plugin_type=ColumnPlugin.__name__)
col.save()
return response
class ColumnPlugin(CMSPluginBase):
model = Column
module = _("Multi Columns Classbased")
name = _("Column Classbased")
render_template = "cms/plugins/column.html"
parent_classes = ["MultiColumnPlugin"]
allow_children = True
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder': placeholder,
})
return context
plugin_pool.register_plugin(MultiColumnPlugin)
plugin_pool.register_plugin(ColumnPlugin)
| bsd-3-clause |
ltilve/chromium | tools/usb_gadget/usb_descriptors_test.py | 95 | 6817 | #!/usr/bin/python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
import hid_constants
import usb_descriptors
class DescriptorWithField(usb_descriptors.Descriptor):
pass
DescriptorWithField.AddField('bField', 'B')
class DescriptorWithDefault(usb_descriptors.Descriptor):
pass
DescriptorWithDefault.AddField('bDefault', 'B', default=42)
class DescriptorWithFixed(usb_descriptors.Descriptor):
pass
DescriptorWithFixed.AddFixedField('bFixed', 'B', 42)
class DescriptorWithComputed(usb_descriptors.Descriptor):
@property
def foo(self):
return 42
DescriptorWithComputed.AddComputedField('bComputed', 'B', 'foo')
class DescriptorWithDescriptors(usb_descriptors.DescriptorContainer):
pass
DescriptorWithDescriptors.AddField('bType', 'B')
class DescriptorTest(unittest.TestCase):
def test_default(self):
obj = DescriptorWithDefault()
self.assertEquals(obj.bDefault, 42)
def test_change_default(self):
obj = DescriptorWithDefault()
obj.bDefault = 1
self.assertEquals(obj.bDefault, 1)
def test_override_default(self):
obj = DescriptorWithDefault(bDefault=56)
self.assertEquals(obj.bDefault, 56)
def test_fixed(self):
obj = DescriptorWithFixed()
self.assertEquals(obj.bFixed, 42)
def test_set_fixed(self):
with self.assertRaises(RuntimeError):
DescriptorWithFixed(bFixed=1)
def test_modify_fixed(self):
obj = DescriptorWithFixed()
with self.assertRaises(RuntimeError):
obj.bFixed = 1
def test_computed(self):
obj = DescriptorWithComputed()
self.assertEquals(obj.bComputed, 42)
def test_set_computed(self):
with self.assertRaises(RuntimeError):
DescriptorWithComputed(bComputed=1)
def test_modify_computed(self):
obj = DescriptorWithComputed()
with self.assertRaises(RuntimeError):
obj.bComputed = 1
def test_unexpected(self):
with self.assertRaisesRegexp(TypeError, 'Unexpected'):
DescriptorWithField(bUnexpected=1)
def test_missing(self):
with self.assertRaisesRegexp(TypeError, 'Missing'):
DescriptorWithField()
def test_size(self):
obj = DescriptorWithField(bField=42)
self.assertEquals(obj.struct_size, 1)
self.assertEquals(obj.total_size, 1)
def test_encode(self):
obj = DescriptorWithField(bField=0xff)
self.assertEquals(obj.Encode(), '\xff')
def test_string(self):
obj = DescriptorWithField(bField=42)
string = str(obj)
self.assertIn('bField', string)
self.assertIn('42', string)
def test_container(self):
parent = DescriptorWithDescriptors(bType=0)
child1 = DescriptorWithField(bField=1)
parent.Add(child1)
child2 = DescriptorWithField(bField=2)
parent.Add(child2)
self.assertEquals(parent.total_size, 3)
self.assertEquals(parent.Encode(), '\x00\x01\x02')
string = str(parent)
self.assertIn('bType', string)
self.assertIn('bField', string)
class TestUsbDescriptors(unittest.TestCase):
def test_device_descriptor(self):
device_desc = usb_descriptors.DeviceDescriptor(
idVendor=0xDEAD,
idProduct=0xBEEF,
bcdDevice=0x0100,
bNumConfigurations=1)
self.assertEquals(
device_desc.Encode(),
'\x12\x01\x00\x02\x00\x00\x00\x40\xAD\xDE\xEF\xBE\x00\x01\x00\x00\x00'
'\x01')
def test_unique_interfaces(self):
interface_desc1 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc2 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1,
bAlternateSetting=1)
interface_desc3 = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
configuration_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0xC0,
MaxPower=100)
configuration_desc.AddInterface(interface_desc1)
configuration_desc.AddInterface(interface_desc2)
with self.assertRaisesRegexp(RuntimeError, r'Interface 1 \(alternate 0\)'):
configuration_desc.AddInterface(interface_desc3)
def test_unique_endpoints(self):
endpoint_desc1 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
endpoint_desc2 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x81,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
endpoint_desc3 = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x01,
wMaxPacketSize=32,
bInterval=10)
interface_desc = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc.AddEndpoint(endpoint_desc1)
interface_desc.AddEndpoint(endpoint_desc2)
with self.assertRaisesRegexp(RuntimeError, 'Endpoint 0x01 already defined'):
interface_desc.AddEndpoint(endpoint_desc3)
def test_configuration_descriptor(self):
endpoint_desc = usb_descriptors.EndpointDescriptor(
bEndpointAddress=0x01,
bmAttributes=0x02,
wMaxPacketSize=64,
bInterval=1)
encoded_endpoint = '\x07\x05\x01\x02\x40\x00\x01'
self.assertEquals(endpoint_desc.Encode(), encoded_endpoint)
interface_desc = usb_descriptors.InterfaceDescriptor(bInterfaceNumber=1)
interface_desc.AddEndpoint(endpoint_desc)
self.assertEquals([endpoint_desc], interface_desc.GetEndpoints())
encoded_interface = ('\x09\x04\x01\x00\x01\xFF\xFF\xFF\x00' +
encoded_endpoint)
self.assertEquals(interface_desc.Encode(), encoded_interface)
configuration_desc = usb_descriptors.ConfigurationDescriptor(
bmAttributes=0xC0,
MaxPower=100)
configuration_desc.AddInterface(interface_desc)
self.assertEquals([interface_desc], configuration_desc.GetInterfaces())
encoded_configuration = ('\x09\x02\x19\x00\x01\x01\x00\xC0\x64' +
encoded_interface)
self.assertEquals(configuration_desc.Encode(), encoded_configuration)
def test_encode_hid_descriptor(self):
hid_desc = usb_descriptors.HidDescriptor()
hid_desc.AddDescriptor(hid_constants.DescriptorType.REPORT, 0x80)
hid_desc.AddDescriptor(hid_constants.DescriptorType.PHYSICAL, 0x60)
encoded_desc = '\x0C\x21\x11\x01\x00\x02\x22\x80\x00\x23\x60\x00'
self.assertEquals(hid_desc.Encode(), encoded_desc)
def test_print_hid_descriptor(self):
hid_desc = usb_descriptors.HidDescriptor()
hid_desc.AddDescriptor(hid_constants.DescriptorType.REPORT, 0x80)
hid_desc.AddDescriptor(hid_constants.DescriptorType.PHYSICAL, 0x60)
string = str(hid_desc)
self.assertIn('0x22', string)
self.assertIn('0x23', string)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Neoklosch/Motey | motey/models/service.py | 1 | 2107 | import uuid
from motey.models.image import Image
from motey.models.service_state import ServiceState
class Service(object):
"""
Model object. Represent a service.
A service can have multiple states, action types and service types.
"""
def __init__(self, service_name, images, id=uuid.uuid4().hex, state=ServiceState.INITIAL, state_message=''):
"""
Constructor of the service model.
:param service_name: the name of the service
:type service_name: str
:param images: list of images which are asociated with the service
:type images: list
:param id: autogenerated id of the service
:type id: uuid
:param state: current state of the service. Default `INITIAL`.
:type state: motey.models.service_state.ServiceState
:param state_message: message for the current service state
:type state_message: str
"""
self.id = id
self.service_name = service_name
self.images = images
self.state = state
self.state_message = state_message
def __iter__(self):
yield 'id', self.id
yield 'service_name', self.service_name
yield 'images', [dict(image) for image in self.images]
yield 'state', self.state
yield 'state_message', self.state_message
@staticmethod
def transform(data):
"""
Static method to translate the service dict data into a service model.
:param data: service dict to be transformed
:type data: dict
:return: the translated service model, None if something went wrong
"""
if 'service_name' not in data or 'images' not in data:
return None
return Service(
id=data['id'] if 'id' in data else uuid.uuid4().hex,
service_name=data['service_name'],
images=[Image.transform(image) for image in data['images']],
state=data['state'] if 'state' in data else ServiceState.INITIAL,
state_message=data['state_message'] if 'state_message' in data else ''
)
| apache-2.0 |
nitinitprof/odoo | addons/mail/tests/common.py | 142 | 5740 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2012-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tests import common
class TestMail(common.SavepointCase):
@classmethod
def _init_mock_build_email(cls):
cls._build_email_args_list = []
cls._build_email_kwargs_list = []
def setUp(self):
super(TestMail, self).setUp()
self._build_email_args_list[:] = []
self._build_email_kwargs_list[:] = []
@classmethod
def setUpClass(cls):
super(TestMail, cls).setUpClass()
cr, uid = cls.cr, cls.uid
def build_email(self, *args, **kwargs):
cls._build_email_args_list.append(args)
cls._build_email_kwargs_list.append(kwargs)
return build_email.origin(self, *args, **kwargs)
def send_email(self, cr, uid, message, *args, **kwargs):
return message['Message-Id']
cls._init_mock_build_email()
cls.registry('ir.mail_server')._patch_method('build_email', build_email)
cls.registry('ir.mail_server')._patch_method('send_email', send_email)
# Usefull models
cls.ir_model = cls.registry('ir.model')
cls.ir_model_data = cls.registry('ir.model.data')
cls.ir_attachment = cls.registry('ir.attachment')
cls.mail_alias = cls.registry('mail.alias')
cls.mail_thread = cls.registry('mail.thread')
cls.mail_group = cls.registry('mail.group')
cls.mail_mail = cls.registry('mail.mail')
cls.mail_message = cls.registry('mail.message')
cls.mail_notification = cls.registry('mail.notification')
cls.mail_followers = cls.registry('mail.followers')
cls.mail_message_subtype = cls.registry('mail.message.subtype')
cls.res_users = cls.registry('res.users')
cls.res_partner = cls.registry('res.partner')
# Find Employee group
cls.group_employee_id = cls.env.ref('base.group_user').id or False
# Partner Data
# User Data: employee, noone
cls.user_employee_id = cls.res_users.create(cr, uid, {
'name': 'Ernest Employee',
'login': 'ernest',
'alias_name': 'ernest',
'email': 'e.e@example.com',
'signature': '--\nErnest',
'notify_email': 'always',
'groups_id': [(6, 0, [cls.group_employee_id])]
}, {'no_reset_password': True})
cls.user_noone_id = cls.res_users.create(cr, uid, {
'name': 'Noemie NoOne',
'login': 'noemie',
'alias_name': 'noemie',
'email': 'n.n@example.com',
'signature': '--\nNoemie',
'notify_email': 'always',
'groups_id': [(6, 0, [])]
}, {'no_reset_password': True})
# Test users to use through the various tests
cls.res_users.write(cr, uid, uid, {'name': 'Administrator'})
cls.user_raoul_id = cls.res_users.create(cr, uid, {
'name': 'Raoul Grosbedon',
'signature': 'SignRaoul',
'email': 'raoul@raoul.fr',
'login': 'raoul',
'alias_name': 'raoul',
'groups_id': [(6, 0, [cls.group_employee_id])]
}, {'no_reset_password': True})
cls.user_bert_id = cls.res_users.create(cr, uid, {
'name': 'Bert Tartignole',
'signature': 'SignBert',
'email': 'bert@bert.fr',
'login': 'bert',
'alias_name': 'bert',
'groups_id': [(6, 0, [])]
}, {'no_reset_password': True})
cls.user_raoul = cls.res_users.browse(cr, uid, cls.user_raoul_id)
cls.user_bert = cls.res_users.browse(cr, uid, cls.user_bert_id)
cls.user_admin = cls.res_users.browse(cr, uid, uid)
cls.partner_admin_id = cls.user_admin.partner_id.id
cls.partner_raoul_id = cls.user_raoul.partner_id.id
cls.partner_bert_id = cls.user_bert.partner_id.id
# Test 'pigs' group to use through the various tests
cls.group_pigs_id = cls.mail_group.create(
cr, uid,
{'name': 'Pigs', 'description': 'Fans of Pigs, unite !', 'alias_name': 'group+pigs'},
{'mail_create_nolog': True}
)
cls.group_pigs = cls.mail_group.browse(cr, uid, cls.group_pigs_id)
# Test mail.group: public to provide access to everyone
cls.group_jobs_id = cls.mail_group.create(cr, uid, {'name': 'Jobs', 'public': 'public'})
# Test mail.group: private to restrict access
cls.group_priv_id = cls.mail_group.create(cr, uid, {'name': 'Private', 'public': 'private'})
@classmethod
def tearDownClass(cls):
# Remove mocks
cls.registry('ir.mail_server')._revert_method('build_email')
cls.registry('ir.mail_server')._revert_method('send_email')
super(TestMail, cls).tearDownClass()
| agpl-3.0 |
jralls/gramps | gramps/plugins/export/exportcsv.py | 5 | 23778 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Douglas S. Blank
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Vassilii Khachaturov
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Export to CSV Spreadsheet."
#-------------------------------------------------------------------------
#
# Standard Python Modules
#
#-------------------------------------------------------------------------
import os
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
import csv
from io import StringIO
import codecs
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
import collections
LOG = logging.getLogger(".ExportCSV")
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.lib import EventType, Person
from gramps.gen.lib.eventroletype import EventRoleType
from gramps.gui.plug.export import WriterOptionBox
from gramps.gen.utils.string import gender as gender_map
from gramps.gen.datehandler import get_date
from gramps.gen.display.place import displayer as _pd
from gramps.gui.glade import Glade
from gramps.gen.constfunc import win
#-------------------------------------------------------------------------
#
# The function that does the exporting
#
#-------------------------------------------------------------------------
def exportData(database, filename, user, option_box=None):
gw = CSVWriter(database, filename, user, option_box)
return gw.export_data()
#-------------------------------------------------------------------------
#
# Support Functions
#
#-------------------------------------------------------------------------
def sortable_string_representation(text):
numeric = ""
alpha = ""
for s in text:
if s.isdigit():
numeric += s
else:
alpha += s
return alpha + (("0" * 10) + numeric)[-10:]
def get_primary_event_ref_from_type(db, person, event_name):
"""
>>> get_primary_event_ref_from_type(db, Person(), "Baptism"):
"""
for ref in person.event_ref_list:
if ref.get_role() == EventRoleType.PRIMARY:
event = db.get_event_from_handle(ref.ref)
if event and event.type.is_type(event_name):
return ref
return None
def get_primary_source_title(db, obj):
for citation_handle in obj.get_citation_list():
citation = db.get_citation_from_handle(citation_handle)
source = db.get_source_from_handle(citation.get_reference_handle())
if source:
return source.get_title()
return ""
#-------------------------------------------------------------------------
#
# CSVWriter Options
#
#-------------------------------------------------------------------------
class CSVWriterOptionBox(WriterOptionBox):
"""
Create a VBox with the option widgets and define methods to retrieve
the options.
"""
def __init__(self, person, dbstate, uistate, track=[], window=None):
WriterOptionBox.__init__(self, person, dbstate, uistate, track=track,
window=window)
## TODO: add place filter selection
self.include_individuals = 1
self.include_marriages = 1
self.include_children = 1
self.include_places = 1
self.translate_headers = 1
self.include_individuals_check = None
self.include_marriages_check = None
self.include_children_check = None
self.include_places_check = None
self.translate_headers_check = None
def get_option_box(self):
from gi.repository import Gtk
option_box = WriterOptionBox.get_option_box(self)
self.include_individuals_check = Gtk.CheckButton(label=_("Include people"))
self.include_marriages_check = Gtk.CheckButton(label=_("Include marriages"))
self.include_children_check = Gtk.CheckButton(label=_("Include children"))
self.include_places_check = Gtk.CheckButton(label=_("Include places"))
self.translate_headers_check = Gtk.CheckButton(label=_("Translate headers"))
self.include_individuals_check.set_active(1)
self.include_marriages_check.set_active(1)
self.include_children_check.set_active(1)
self.include_places_check.set_active(1)
self.translate_headers_check.set_active(1)
option_box.pack_start(self.include_individuals_check, False, True, 0)
option_box.pack_start(self.include_marriages_check, False, True, 0)
option_box.pack_start(self.include_children_check, False, True, 0)
option_box.pack_start(self.include_places_check, False, True, 0)
option_box.pack_start(self.translate_headers_check, False, True, 0)
return option_box
def parse_options(self):
WriterOptionBox.parse_options(self)
if self.include_individuals_check:
self.include_individuals = self.include_individuals_check.get_active()
self.include_marriages = self.include_marriages_check.get_active()
self.include_children = self.include_children_check.get_active()
self.include_places = self.include_places_check.get_active()
self.translate_headers = self.translate_headers_check.get_active()
#-------------------------------------------------------------------------
#
# CSVWriter class
#
#-------------------------------------------------------------------------
class CSVWriter:
def __init__(self, database, filename, user, option_box=None):
self.db = database
self.option_box = option_box
self.filename = filename
self.user = user
if isinstance(self.user.callback, collections.Callable): # callback is really callable
self.update = self.update_real
else:
self.update = self.update_empty
self.plist = {}
self.flist = {}
self.place_list = {}
self.persons_details_done = []
self.persons_notes_done = []
self.person_ids = {}
if not option_box:
self.include_individuals = 1
self.include_marriages = 1
self.include_children = 1
self.include_places = 1
self.translate_headers = 1
else:
self.option_box.parse_options()
self.db = option_box.get_filtered_database(self.db)
self.include_individuals = self.option_box.include_individuals
self.include_marriages = self.option_box.include_marriages
self.include_children = self.option_box.include_children
self.include_places = self.option_box.include_places
self.translate_headers = self.option_box.translate_headers
self.plist = [x for x in self.db.iter_person_handles()]
# make place list so that dependencies are first:
self.place_list = []
place_list = sorted([x for x in self.db.iter_place_handles()])
while place_list:
handle = place_list[0]
place = self.db.get_place_from_handle(handle)
if place:
if all([(x.ref in self.place_list) for x in place.placeref_list]):
self.place_list.append(place_list.pop(0))
else: # put at the back of the line:
place_list.append(place_list.pop(0))
else:
place_list.pop(0)
# get the families for which these people are spouses:
self.flist = {}
for key in self.plist:
p = self.db.get_person_from_handle(key)
if p:
for family_handle in p.get_family_handle_list():
self.flist[family_handle] = 1
# now add the families for which these people are a child:
for family_handle in self.db.iter_family_handles():
family = self.db.get_family_from_handle(family_handle)
if family:
for child_ref in family.get_child_ref_list():
if child_ref:
child_handle = child_ref.ref
if child_handle in self.plist:
self.flist[family_handle] = 1
def update_empty(self):
pass
def update_real(self):
self.count += 1
newval = int(100*self.count/self.total)
if newval != self.oldval:
self.user.callback(newval)
self.oldval = newval
def writeln(self):
self.g.writerow([])
def write_csv(self, *items):
self.g.writerow(items)
def export_data(self):
self.dirname = os.path.dirname (self.filename)
try:
self.fp = open(self.filename, "w",
encoding='utf_8_sig' if win() else 'utf_8',
newline='')
self.g = csv.writer(self.fp)
except IOError as msg:
msg2 = _("Could not create %s") % self.filename
self.user.notify_error(msg2,str(msg))
return False
except:
self.user.notify_error(_("Could not create %s") % self.filename)
return False
######################### initialize progress bar
self.count = 0
self.total = 0
self.oldval = 0
if self.include_individuals:
self.total += len(self.plist)
if self.include_marriages:
self.total += len(self.flist)
if self.include_children:
self.total += len(self.flist)
if self.include_places:
self.total += len(self.place_list)
########################
LOG.debug("Possible people to export: %s", len(self.plist))
LOG.debug("Possible families to export: %s", len(self.flist))
LOG.debug("Possible places to export: %s", len(self.place_list))
###########################
if self.include_places:
if self.translate_headers:
self.write_csv(_("Place"), _("Title"), _("Name"),
_("Type"), _("Latitude"), _("Longitude"),
_("Code"), _("Enclosed_by"), _("Date"))
else:
self.write_csv("Place", "Title", "Name",
"Type", "Latitude", "Longitude",
"Code", "Enclosed_by", "Date")
for key in self.place_list:
place = self.db.get_place_from_handle(key)
if place:
place_id = place.gramps_id
place_title = place.title
place_name = place.name.value
place_type = str(place.place_type)
place_latitude = place.lat
place_longitude = place.long
place_code = place.code
if place.placeref_list:
for placeref in place.placeref_list:
placeref_obj = self.db.get_place_from_handle(placeref.ref)
placeref_date = ""
if not placeref.date.is_empty():
placeref_date = placeref.date
placeref_id = ""
if placeref_obj:
placeref_id = "[%s]" % placeref_obj.gramps_id
self.write_csv("[%s]" % place_id, place_title, place_name, place_type,
place_latitude, place_longitude, place_code, placeref_id,
placeref_date)
else:
self.write_csv("[%s]" % place_id, place_title, place_name, place_type,
place_latitude, place_longitude, place_code, "",
"")
self.update()
self.writeln()
########################### sort:
sortorder = []
dropped_surnames = set()
for key in self.plist:
person = self.db.get_person_from_handle(key)
if person:
primary_name = person.get_primary_name()
first_name = primary_name.get_first_name()
surname_obj = primary_name.get_primary_surname()
surname = surname_obj.get_surname()
# See bug #6955
nonprimary_surnames = set(primary_name.get_surname_list())
nonprimary_surnames.remove(surname_obj)
dropped_surnames.update(nonprimary_surnames)
sortorder.append( (surname, first_name, key) )
if dropped_surnames:
LOG.warning(
_("CSV export doesn't support non-primary surnames, "
"{count} dropped").format(
count=len(dropped_surnames)) )
LOG.debug(
"Dropped surnames: " +
', '.join([("%s %s %s" % (surname.get_prefix(),
surname.get_surname(), surname.get_connector())).strip()
for surname in dropped_surnames]))
sortorder.sort() # will sort on tuples
plist = [data[2] for data in sortorder]
###########################
if self.include_individuals:
if self.translate_headers:
self.write_csv(
_("Person"), _("Surname"), _("Given"),
_("Call"), _("Suffix"), _("Prefix"),
_("Person|Title"), _("Gender"),
_("Birth date"), _("Birth place"), _("Birth source"),
_("Baptism date"), _("Baptism place"), _("Baptism source"),
_("Death date"), _("Death place"), _("Death source"),
_("Burial date"), _("Burial place"), _("Burial source"),
_("Note"))
else:
self.write_csv(
"Person", "Surname", "Given",
"Call", "Suffix", "Prefix",
"Title", "Gender",
"Birth date", "Birth place", "Birth source",
"Baptism date", "Baptism place", "Baptism source",
"Death date", "Death place", "Death source",
"Burial date", "Burial place", "Burial source",
"Note")
for key in plist:
person = self.db.get_person_from_handle(key)
if person:
primary_name = person.get_primary_name()
first_name = primary_name.get_first_name()
surname_obj = primary_name.get_primary_surname()
surname = surname_obj.get_surname()
prefix = surname_obj.get_prefix()
suffix = primary_name.get_suffix()
title = primary_name.get_title()
grampsid = person.get_gramps_id()
grampsid_ref = ""
if grampsid != "":
grampsid_ref = "[" + grampsid + "]"
note = '' # don't export notes
callname = primary_name.get_call_name()
gender = person.get_gender()
if gender == Person.MALE:
gender = gender_map[Person.MALE]
elif gender == Person.FEMALE:
gender = gender_map[Person.FEMALE]
else:
gender = gender_map[Person.UNKNOWN]
# Birth:
birthdate = ""
birthplace = ""
birthsource = ""
birth_ref = person.get_birth_ref()
if birth_ref:
birth = self.db.get_event_from_handle(birth_ref.ref)
if birth:
birthdate = self.format_date( birth)
birthplace = self.format_place(birth)
birthsource = get_primary_source_title(self.db, birth)
# Baptism:
baptismdate = ""
baptismplace = ""
baptismsource = ""
baptism_ref = get_primary_event_ref_from_type(
self.db, person, "Baptism")
if baptism_ref:
baptism = self.db.get_event_from_handle(baptism_ref.ref)
if baptism:
baptismdate = self.format_date( baptism)
baptismplace = self.format_place(baptism)
baptismsource = get_primary_source_title(self.db, baptism)
# Death:
deathdate = ""
deathplace = ""
deathsource = ""
death_ref = person.get_death_ref()
if death_ref:
death = self.db.get_event_from_handle(death_ref.ref)
if death:
deathdate = self.format_date( death)
deathplace = self.format_place(death)
deathsource = get_primary_source_title(self.db, death)
# Burial:
burialdate = ""
burialplace = ""
burialsource = ""
burial_ref = get_primary_event_ref_from_type(
self.db, person, "Burial")
if burial_ref:
burial = self.db.get_event_from_handle(burial_ref.ref)
if burial:
burialdate = self.format_date( burial)
burialplace = self.format_place(burial)
burialsource = get_primary_source_title(self.db, burial)
# Write it out:
self.write_csv(grampsid_ref, surname, first_name, callname,
suffix, prefix, title, gender,
birthdate, birthplace, birthsource,
baptismdate, baptismplace, baptismsource,
deathdate, deathplace, deathsource,
burialdate, burialplace, burialsource,
note)
self.update()
self.writeln()
########################### sort:
sortorder = []
for key in self.flist:
family = self.db.get_family_from_handle(key)
if family:
marriage_id = family.get_gramps_id()
sortorder.append(
(sortable_string_representation(marriage_id), key)
)
sortorder.sort() # will sort on tuples
flist = [data[1] for data in sortorder]
###########################
if self.include_marriages:
if self.translate_headers:
self.write_csv(_("Marriage"), _("Husband"), _("Wife"),
_("Date"), _("Place"), _("Source"), _("Note"))
else:
self.write_csv("Marriage", "Husband", "Wife",
"Date", "Place", "Source", "Note")
for key in flist:
family = self.db.get_family_from_handle(key)
if family:
marriage_id = family.get_gramps_id()
if marriage_id != "":
marriage_id = "[" + marriage_id + "]"
mother_id = ''
father_id = ''
father_handle = family.get_father_handle()
if father_handle:
father = self.db.get_person_from_handle(father_handle)
father_id = father.get_gramps_id()
if father_id != "":
father_id = "[" + father_id + "]"
mother_handle = family.get_mother_handle()
if mother_handle:
mother = self.db.get_person_from_handle(mother_handle)
mother_id = mother.get_gramps_id()
if mother_id != "":
mother_id = "[" + mother_id + "]"
# get mdate, mplace
mdate, mplace, source = '', '', ''
event_ref_list = family.get_event_ref_list()
for event_ref in event_ref_list:
event = self.db.get_event_from_handle(event_ref.ref)
if event.get_type() == EventType.MARRIAGE:
mdate = self.format_date( event)
mplace = self.format_place(event)
source = get_primary_source_title(self.db, event)
note = ''
self.write_csv(marriage_id, father_id, mother_id, mdate,
mplace, source, note)
self.update()
self.writeln()
if self.include_children:
if self.translate_headers:
self.write_csv(_("Family"), _("Child"))
else:
self.write_csv("Family", "Child")
for key in flist:
family = self.db.get_family_from_handle(key)
if family:
family_id = family.get_gramps_id()
if family_id != "":
family_id = "[" + family_id + "]"
for child_ref in family.get_child_ref_list():
child_handle = child_ref.ref
child = self.db.get_person_from_handle(child_handle)
grampsid = child.get_gramps_id()
grampsid_ref = ""
if grampsid != "":
grampsid_ref = "[" + grampsid + "]"
self.write_csv(family_id, grampsid_ref)
self.update()
self.writeln()
self.fp.close()
return True
def format_date(self, date):
return get_date(date)
def format_place(self, event):
"""
If places are included in the export return a link, else return a
formatted place for the given event.
"""
if self.include_places:
place_handle = event.get_place_handle()
if place_handle:
place = self.db.get_place_from_handle(place_handle)
if place:
return "[%s]" % place.get_gramps_id()
return ""
else:
return _pd.display_event(self.db, event)
| gpl-2.0 |
sanzcarlos/CiscoCollab | axl_zeep.py | 1 | 10430 | #! /usr/bin/python3
# -*- coding: iso-8859-15 -*-
# *------------------------------------------------------------------
# * axl_zeep.py
# *
# * Cisco AXL Python
# *
# * Copyright (C) 2021 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# *------------------------------------------------------------------
# *
# Import Modules
from lxml import etree
from requests import Session
from requests.auth import HTTPBasicAuth
from zeep import Client, Settings, Plugin, xsd
from zeep.transports import Transport
from zeep.cache import SqliteCache
from zeep.plugins import HistoryPlugin
from zeep.exceptions import Fault
from prettytable import PrettyTable
from configobj import ConfigObj
import getopt
import logging
import sys
import platform
import time
import uuid
import os
import csv
import urllib3
import json
import pprint
class PrettyLog():
def __init__(self, obj):
self.obj = obj
def __repr__(self):
return pprint.pformat(self.obj)
# Argumentos pasados por linea de comandos
def parse_command_line(args):
logger.debug('Ha entrado en la funcion parse_command_line()')
global element_config_file
global cspconfigfile
try:
# Aceptamos
opts, args = getopt.getopt(args[1:],"hc:f:",["help", "config-file=", "csv-file="])
except getopt.GetoptError as err:
print (str(err))
logger.info(get_usage())
sys.exit(2)
"""
* options:
* -c, --config-file <Config file>
"""
for option, args in opts:
if option in ("-h", "--help"):
logger.debug('Mostrando la Ayuda')
logger.info(get_usage())
sys.exit()
elif option in ("-c", "--config-file"):
logger.debug('Se ha pasado un fichero de configuracion')
element_config_file = 'conf/' + args
logger.info('Ha seleccionado el fichero de configuracion: %s' % (element_config_file))
cspconfigfile = ConfigObj(element_config_file)
# No se ha pasado un fichero de configuracion como argumento del script
if(element_config_file==None):
logger.info(get_usage())
csp_table_file=PrettyTable(['id', 'Filename'])
csp_table_id=0
csp_dir = 'conf/'
csp_file = []
logger.debug('Buscamos todos los archivos *.cfg del directorio conf/')
for file in os.listdir(csp_dir):
if file.endswith(".cfg"):
csp_file.append(file)
csp_table_file.add_row([csp_table_id,file])
csp_table_id += 1
logger.debug('El numero de ficheros de configuracion es: %d',csp_table_id)
# Si solo tenemos un fichero de configuracion, vamos a utilizar ese fichero, en caso contrario se pedira que nos digan que fichero de configuracion tenemos que utilizar.
if csp_table_id == 1:
element_config_file = csp_dir + csp_file[0]
logger.info('Ha seleccionado el fichero de configuracion: %s' % (element_config_file))
cspconfigfile = ConfigObj(element_config_file)
return {'Status':True,'Detail': element_config_file}
else:
print (csp_table_file)
csp_file_config = input('Seleccione el archivo de configuracion: ')
if int(csp_file_config) > csp_table_id - 1:
logger.error('Ha seleccionado un fichero erroneo')
return False
else:
element_config_file = csp_dir + csp_file[int(csp_file_config)]
logger.info('Ha seleccionado el fichero de configuracion: %s' % (element_config_file))
cspconfigfile = ConfigObj(element_config_file)
return {'Status':True,'Detail': element_config_file}
return True
# Help function
def get_usage():
logger.debug('Ha entrado en la funcion get_usage()')
return "Uso: -c <Config file>"
# This class lets you view the incoming and outgoing http headers and/or XML
class MyLoggingPlugin(Plugin):
def ingress(self, envelope, http_headers, operation):
print(etree.tostring(envelope, pretty_print=True))
return envelope, http_headers
def egress(self, envelope, http_headers, operation, binding_options):
print(etree.tostring(envelope, pretty_print=True))
return envelope, http_headers
# Funcion para crear el cliente SOAP que atacara a Cisco Unified Communications Manager
def client_soap(config_file):
logger.debug('Ha entrado en la funcion client_soap()')
csp_cmserver = cspconfigfile['CUCM']['server']
csp_username = cspconfigfile['CUCM']['user']
csp_password = cspconfigfile['CUCM']['pass']
csp_version = cspconfigfile['CUCM']['version']
if platform.system() == 'Windows':
logger.debug('El sistema operativo es: %s' % (platform.system()))
wsdl = 'file://' + os.getcwd().replace ("\\","//") + '//Schema//CUCM//' + csp_version + '//AXLAPI.wsdl'
else:
logger.debug('El sistema operativo es: %s' % (platform.system()))
wsdl = 'file://' + os.getcwd() + '/Schema/CUCM/' + csp_version + '/AXLAPI.wsdl'
csp_location = 'https://' + csp_cmserver + '/axl/'
logger.debug('El valor de csp_cmserver es: %s' % (csp_cmserver))
logger.debug('El valor de csp_username es: %s' % (csp_username))
logger.debug('El valor de csp_version es: %s' % (csp_version))
logger.debug('El valor de csp_location es: %s' % (csp_location))
logger.debug('El valor de wsdl es: %s' % (wsdl))
# history shows http_headers
global history
history = HistoryPlugin()
# The first step is to create a SOAP client session
session = Session()
# We avoid certificate verification by default, but you can uncomment and set
# your certificate here, and comment out the False setting
#session.verify = CERT
session.verify = False
session.auth = HTTPBasicAuth(csp_username, csp_password)
transport = Transport(session=session, timeout=10, cache=SqliteCache())
# strict=False is not always necessary, but it allows zeep to parse imperfect XML
settings = Settings(strict=False, xml_huge_tree=True)
try:
csp_soap_client = Client(wsdl,
settings=settings,
transport=transport,
plugins=[MyLoggingPlugin(),history],
)
service = csp_soap_client.create_service("{http://www.cisco.com/AXLAPIService/}AXLAPIBinding", csp_location)
except:
logger.error('Se ha producido un error al crear el cliente soap')
logger.debug(sys.exc_info())
logger.error(sys.exc_info()[1])
sys.exit()
else:
logger.info('Se ha creado el cliente SOAP.')
return service
# Funcion para dar de alta una sede
def AltaSede(logger,csp_soap_client, cspconfigfile):
'''
# *------------------------------------------------------------------
# * function AltaSede(logger,csp_soap_client, cspconfigfile):
# *
# * Copyright (C) 2021 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# *------------------------------------------------------------------
'''
# Main Function
if __name__=='__main__':
logging.basicConfig(level=logging.INFO,
format='%(asctime)-25s %(name)s [%(process)d]: %(levelname)-8s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='Log/' + time.strftime("%Y%m%d-%H%M%S-") + str(uuid.uuid4()) + '.log',
filemode='w',
)
urllib3.disable_warnings()
element_config_file = None
history = None
logger = logging.getLogger('cisco.cucm.axl.zeep')
logger.setLevel(logging.DEBUG)
console = logging.StreamHandler()
#formatter = logging.Formatter('%(asctime)-25s %(name)s [%(process)d]: %(levelname)-8s %(message)s')
formatter = logging.Formatter('%(asctime)-22s | %(filename)s:%(lineno)-4s | %(levelname)-9s | %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
console.setFormatter(formatter)
console.setLevel=logger.setLevel
logging.getLogger('').addHandler(console)
logger.info('Estamos usando Python v%s' % (platform.python_version()))
'''
logger.debug('This is a debug message %s' % (variable))
logger.info('This is an info message')
logger.warning('This is a warning message')
logger.error('This is an error message')
logger.critical('This is a critical error message')
'''
# Llamamos a la funcion parse_command_line
if not parse_command_line(sys.argv):
logger.error("Error in parsing arguments")
sys.exit(1)
logger.info('Se ha seleccionado el cliente: %s' % (cspconfigfile['INFO']['customer'].upper()))
# Creamos nuestro cliente SOAP con los parametros del fichero de configuracion
service = client_soap(element_config_file)
'''
Codigo para verificar que esta funcionando la conexion SOAP con el CUCM
soap_data = {
'userid': 'enrique.sacido'
}
try:
user_resp = service.getUser(**soap_data)
except Fault as err:
logger.error('Se ha producido un error en la consulta SOAP: %s' % format(err))
logger.debug(sys.exc_info())
logger.error(sys.exc_info()[1])
sys.exit()
else:
logger.info('getUser Response:\n %s' % user_resp)
logger.debug('HTTP Last Send:\n %s' % PrettyLog(history.last_sent))
logger.debug('HTTP Last Received:\n %s' % PrettyLog(history.last_received))
'''
#CiscoCustomer.Customer(logger, csp_soap_client,cspconfigfile)
#Customer(logger, csp_soap_client,cspconfigfile)
logger.info('Se cerrara el programa')
sys.exit() | mit |
voxmedia/thumbor | tests/test_app.py | 2 | 1712 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from unittest import TestCase
import mock
from preggy import expect
from thumbor.app import (
ThumborServiceApp
)
from thumbor.url import Url
class AppTestCase(TestCase):
def test_can_create_app(self):
ctx = mock.Mock()
app = ThumborServiceApp(ctx)
expect(app).not_to_be_null()
expect(app.context).to_equal(ctx)
def test_can_get_handlers(self):
ctx = mock.Mock(
config=mock.Mock(
UPLOAD_ENABLED=False,
USE_BLACKLIST=False,
HEALTHCHECK_ROUTE=r'/healthcheck',
)
)
app = ThumborServiceApp(ctx)
handlers = app.get_handlers()
expect(handlers).to_length(2)
expect(handlers[0][0]).to_equal(r'/healthcheck')
expect(handlers[1][0]).to_equal(Url.regex())
def test_can_get_handlers_with_upload(self):
ctx = mock.Mock(
config=mock.Mock(
UPLOAD_ENABLED=True,
USE_BLACKLIST=False,
)
)
app = ThumborServiceApp(ctx)
handlers = app.get_handlers()
expect(handlers).to_length(4)
def test_can_get_handlers_with_blacklist(self):
ctx = mock.Mock(
config=mock.Mock(
UPLOAD_ENABLED=False,
USE_BLACKLIST=True,
)
)
app = ThumborServiceApp(ctx)
handlers = app.get_handlers()
expect(handlers).to_length(3)
| mit |
ressu/SickGear | lib/dateutil/relativedelta.py | 216 | 17224 | """
Copyright (c) 2003-2010 Gustavo Niemeyer <gustavo@niemeyer.net>
This module offers extensions to the standard Python
datetime module.
"""
__license__ = "Simplified BSD"
import datetime
import calendar
from six import integer_types
__all__ = ["relativedelta", "MO", "TU", "WE", "TH", "FR", "SA", "SU"]
class weekday(object):
__slots__ = ["weekday", "n"]
def __init__(self, weekday, n=None):
self.weekday = weekday
self.n = n
def __call__(self, n):
if n == self.n:
return self
else:
return self.__class__(self.weekday, n)
def __eq__(self, other):
try:
if self.weekday != other.weekday or self.n != other.n:
return False
except AttributeError:
return False
return True
def __repr__(self):
s = ("MO", "TU", "WE", "TH", "FR", "SA", "SU")[self.weekday]
if not self.n:
return s
else:
return "%s(%+d)" % (s, self.n)
MO, TU, WE, TH, FR, SA, SU = weekdays = tuple([weekday(x) for x in range(7)])
class relativedelta(object):
"""
The relativedelta type is based on the specification of the excelent
work done by M.-A. Lemburg in his mx.DateTime extension. However,
notice that this type does *NOT* implement the same algorithm as
his work. Do *NOT* expect it to behave like mx.DateTime's counterpart.
There's two different ways to build a relativedelta instance. The
first one is passing it two date/datetime classes:
relativedelta(datetime1, datetime2)
And the other way is to use the following keyword arguments:
year, month, day, hour, minute, second, microsecond:
Absolute information.
years, months, weeks, days, hours, minutes, seconds, microseconds:
Relative information, may be negative.
weekday:
One of the weekday instances (MO, TU, etc). These instances may
receive a parameter N, specifying the Nth weekday, which could
be positive or negative (like MO(+1) or MO(-2). Not specifying
it is the same as specifying +1. You can also use an integer,
where 0=MO.
leapdays:
Will add given days to the date found, if year is a leap
year, and the date found is post 28 of february.
yearday, nlyearday:
Set the yearday or the non-leap year day (jump leap days).
These are converted to day/month/leapdays information.
Here is the behavior of operations with relativedelta:
1) Calculate the absolute year, using the 'year' argument, or the
original datetime year, if the argument is not present.
2) Add the relative 'years' argument to the absolute year.
3) Do steps 1 and 2 for month/months.
4) Calculate the absolute day, using the 'day' argument, or the
original datetime day, if the argument is not present. Then,
subtract from the day until it fits in the year and month
found after their operations.
5) Add the relative 'days' argument to the absolute day. Notice
that the 'weeks' argument is multiplied by 7 and added to
'days'.
6) Do steps 1 and 2 for hour/hours, minute/minutes, second/seconds,
microsecond/microseconds.
7) If the 'weekday' argument is present, calculate the weekday,
with the given (wday, nth) tuple. wday is the index of the
weekday (0-6, 0=Mon), and nth is the number of weeks to add
forward or backward, depending on its signal. Notice that if
the calculated date is already Monday, for example, using
(0, 1) or (0, -1) won't change the day.
"""
def __init__(self, dt1=None, dt2=None,
years=0, months=0, days=0, leapdays=0, weeks=0,
hours=0, minutes=0, seconds=0, microseconds=0,
year=None, month=None, day=None, weekday=None,
yearday=None, nlyearday=None,
hour=None, minute=None, second=None, microsecond=None):
if dt1 and dt2:
if (not isinstance(dt1, datetime.date)) or (not isinstance(dt2, datetime.date)):
raise TypeError("relativedelta only diffs datetime/date")
if not type(dt1) == type(dt2): #isinstance(dt1, type(dt2)):
if not isinstance(dt1, datetime.datetime):
dt1 = datetime.datetime.fromordinal(dt1.toordinal())
elif not isinstance(dt2, datetime.datetime):
dt2 = datetime.datetime.fromordinal(dt2.toordinal())
self.years = 0
self.months = 0
self.days = 0
self.leapdays = 0
self.hours = 0
self.minutes = 0
self.seconds = 0
self.microseconds = 0
self.year = None
self.month = None
self.day = None
self.weekday = None
self.hour = None
self.minute = None
self.second = None
self.microsecond = None
self._has_time = 0
months = (dt1.year*12+dt1.month)-(dt2.year*12+dt2.month)
self._set_months(months)
dtm = self.__radd__(dt2)
if dt1 < dt2:
while dt1 > dtm:
months += 1
self._set_months(months)
dtm = self.__radd__(dt2)
else:
while dt1 < dtm:
months -= 1
self._set_months(months)
dtm = self.__radd__(dt2)
delta = dt1 - dtm
self.seconds = delta.seconds+delta.days*86400
self.microseconds = delta.microseconds
else:
self.years = years
self.months = months
self.days = days+weeks*7
self.leapdays = leapdays
self.hours = hours
self.minutes = minutes
self.seconds = seconds
self.microseconds = microseconds
self.year = year
self.month = month
self.day = day
self.hour = hour
self.minute = minute
self.second = second
self.microsecond = microsecond
if isinstance(weekday, integer_types):
self.weekday = weekdays[weekday]
else:
self.weekday = weekday
yday = 0
if nlyearday:
yday = nlyearday
elif yearday:
yday = yearday
if yearday > 59:
self.leapdays = -1
if yday:
ydayidx = [31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 366]
for idx, ydays in enumerate(ydayidx):
if yday <= ydays:
self.month = idx+1
if idx == 0:
self.day = yday
else:
self.day = yday-ydayidx[idx-1]
break
else:
raise ValueError("invalid year day (%d)" % yday)
self._fix()
def _fix(self):
if abs(self.microseconds) > 999999:
s = self.microseconds//abs(self.microseconds)
div, mod = divmod(self.microseconds*s, 1000000)
self.microseconds = mod*s
self.seconds += div*s
if abs(self.seconds) > 59:
s = self.seconds//abs(self.seconds)
div, mod = divmod(self.seconds*s, 60)
self.seconds = mod*s
self.minutes += div*s
if abs(self.minutes) > 59:
s = self.minutes//abs(self.minutes)
div, mod = divmod(self.minutes*s, 60)
self.minutes = mod*s
self.hours += div*s
if abs(self.hours) > 23:
s = self.hours//abs(self.hours)
div, mod = divmod(self.hours*s, 24)
self.hours = mod*s
self.days += div*s
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years += div*s
if (self.hours or self.minutes or self.seconds or self.microseconds or
self.hour is not None or self.minute is not None or
self.second is not None or self.microsecond is not None):
self._has_time = 1
else:
self._has_time = 0
def _set_months(self, months):
self.months = months
if abs(self.months) > 11:
s = self.months//abs(self.months)
div, mod = divmod(self.months*s, 12)
self.months = mod*s
self.years = div*s
else:
self.years = 0
def __add__(self, other):
if isinstance(other, relativedelta):
return relativedelta(years=other.years+self.years,
months=other.months+self.months,
days=other.days+self.days,
hours=other.hours+self.hours,
minutes=other.minutes+self.minutes,
seconds=other.seconds+self.seconds,
microseconds=other.microseconds+self.microseconds,
leapdays=other.leapdays or self.leapdays,
year=other.year or self.year,
month=other.month or self.month,
day=other.day or self.day,
weekday=other.weekday or self.weekday,
hour=other.hour or self.hour,
minute=other.minute or self.minute,
second=other.second or self.second,
microsecond=other.microsecond or self.microsecond)
if not isinstance(other, datetime.date):
raise TypeError("unsupported type for add operation")
elif self._has_time and not isinstance(other, datetime.datetime):
other = datetime.datetime.fromordinal(other.toordinal())
year = (self.year or other.year)+self.years
month = self.month or other.month
if self.months:
assert 1 <= abs(self.months) <= 12
month += self.months
if month > 12:
year += 1
month -= 12
elif month < 1:
year -= 1
month += 12
day = min(calendar.monthrange(year, month)[1],
self.day or other.day)
repl = {"year": year, "month": month, "day": day}
for attr in ["hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
repl[attr] = value
days = self.days
if self.leapdays and month > 2 and calendar.isleap(year):
days += self.leapdays
ret = (other.replace(**repl)
+ datetime.timedelta(days=days,
hours=self.hours,
minutes=self.minutes,
seconds=self.seconds,
microseconds=self.microseconds))
if self.weekday:
weekday, nth = self.weekday.weekday, self.weekday.n or 1
jumpdays = (abs(nth)-1)*7
if nth > 0:
jumpdays += (7-ret.weekday()+weekday)%7
else:
jumpdays += (ret.weekday()-weekday)%7
jumpdays *= -1
ret += datetime.timedelta(days=jumpdays)
return ret
def __radd__(self, other):
return self.__add__(other)
def __rsub__(self, other):
return self.__neg__().__radd__(other)
def __sub__(self, other):
if not isinstance(other, relativedelta):
raise TypeError("unsupported type for sub operation")
return relativedelta(years=self.years-other.years,
months=self.months-other.months,
days=self.days-other.days,
hours=self.hours-other.hours,
minutes=self.minutes-other.minutes,
seconds=self.seconds-other.seconds,
microseconds=self.microseconds-other.microseconds,
leapdays=self.leapdays or other.leapdays,
year=self.year or other.year,
month=self.month or other.month,
day=self.day or other.day,
weekday=self.weekday or other.weekday,
hour=self.hour or other.hour,
minute=self.minute or other.minute,
second=self.second or other.second,
microsecond=self.microsecond or other.microsecond)
def __neg__(self):
return relativedelta(years=-self.years,
months=-self.months,
days=-self.days,
hours=-self.hours,
minutes=-self.minutes,
seconds=-self.seconds,
microseconds=-self.microseconds,
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
def __bool__(self):
return not (not self.years and
not self.months and
not self.days and
not self.hours and
not self.minutes and
not self.seconds and
not self.microseconds and
not self.leapdays and
self.year is None and
self.month is None and
self.day is None and
self.weekday is None and
self.hour is None and
self.minute is None and
self.second is None and
self.microsecond is None)
def __mul__(self, other):
f = float(other)
return relativedelta(years=int(self.years*f),
months=int(self.months*f),
days=int(self.days*f),
hours=int(self.hours*f),
minutes=int(self.minutes*f),
seconds=int(self.seconds*f),
microseconds=int(self.microseconds*f),
leapdays=self.leapdays,
year=self.year,
month=self.month,
day=self.day,
weekday=self.weekday,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microsecond)
__rmul__ = __mul__
def __eq__(self, other):
if not isinstance(other, relativedelta):
return False
if self.weekday or other.weekday:
if not self.weekday or not other.weekday:
return False
if self.weekday.weekday != other.weekday.weekday:
return False
n1, n2 = self.weekday.n, other.weekday.n
if n1 != n2 and not ((not n1 or n1 == 1) and (not n2 or n2 == 1)):
return False
return (self.years == other.years and
self.months == other.months and
self.days == other.days and
self.hours == other.hours and
self.minutes == other.minutes and
self.seconds == other.seconds and
self.leapdays == other.leapdays and
self.year == other.year and
self.month == other.month and
self.day == other.day and
self.hour == other.hour and
self.minute == other.minute and
self.second == other.second and
self.microsecond == other.microsecond)
def __ne__(self, other):
return not self.__eq__(other)
def __div__(self, other):
return self.__mul__(1/float(other))
__truediv__ = __div__
def __repr__(self):
l = []
for attr in ["years", "months", "days", "leapdays",
"hours", "minutes", "seconds", "microseconds"]:
value = getattr(self, attr)
if value:
l.append("%s=%+d" % (attr, value))
for attr in ["year", "month", "day", "weekday",
"hour", "minute", "second", "microsecond"]:
value = getattr(self, attr)
if value is not None:
l.append("%s=%s" % (attr, repr(value)))
return "%s(%s)" % (self.__class__.__name__, ", ".join(l))
# vim:ts=4:sw=4:et
| gpl-3.0 |
GoogleCloudPlatform/professional-services | examples/bq_benchmarks/generic_benchmark_tools/avro_util.py | 2 | 2111 | # Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import avro.schema
class AvroUtil(object):
"""Helps with handling avro files.
Contains functionality that allows extraction of BigQuery tables to GCS in
avro format.
Attributes:
bq_schema(List[google.cloud.bigquery.schema.SchemaField]):
schema of BigQuery table that will be extracted to an avro
file.
schema_name(str): name that will be given to the avro schema.
"""
def __init__(self, bq_schema, schema_name):
self.bq_schema = bq_schema
self.schema_name = schema_name
def get_avro_translated_schema(self):
"""Translates a BigQuery schema to an avro schema.
Returns: Translated Avro schema.
"""
type_conversions = {
'STRING': 'string',
'NUMERIC': {
'type': 'bytes',
'logicalType': 'decimal',
'precision': 38,
'scale': 9,
}
}
fields = []
# TODO(annarudy@google.com): add support for nested fields
for bq_field in self.bq_schema:
field_type = type_conversions[bq_field.field_type]
field = {
'name': bq_field.name,
'type': field_type,
}
fields.append(field)
schema_dict = {
'type': 'record',
'name': self.schema_name,
'fields': fields,
}
avro_schema = avro.schema.Parse(json.dumps(schema_dict))
return avro_schema
| apache-2.0 |
napkindrawing/ansible | lib/ansible/modules/network/cloudengine/ce_netstream_aging.py | 45 | 18169 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = """
---
module: ce_netstream_aging
version_added: "2.4"
short_description: Manages timeout mode of NetStream on HUAWEI CloudEngine switches.
description:
- Manages timeout mode of NetStream on HUAWEI CloudEngine switches.
author: YangYang (@CloudEngine-Ansible)
options:
timeout_interval:
description:
- Netstream timeout interval.
If is active type the interval is 1-60.
If is inactive ,the interval is 5-600.
required: false
default: 30
type:
description:
- Specifies the packet type of netstream timeout active interval.
required: false
choices: ['ip', 'vxlan']
default: null
state:
description:
- Specify desired state of the resource.
required: false
choices: ['present', 'absent']
default: present
timeout_type:
description:
- Netstream timeout type.
required: false
choices: ['active', 'inactive', 'tcp-session', 'manual']
default: null
manual_slot:
description:
- Specifies the slot number of netstream manual timeout.
required: false
default: null
"""
EXAMPLES = '''
- name: netstream aging module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Configure netstream ip timeout active interval , the interval is 40 minutes.
ce_netstream_aging:
timeout_interval: 40
type: ip
timeout_type: active
state: present
provider: "{{ cli }}"
- name: Configure netstream vxlan timeout active interval , the interval is 40 minutes.
ce_netstream_aging:
timeout_interval: 40
type: vxlan
timeout_type: active
active_state: present
provider: "{{ cli }}"
- name: Delete netstream ip timeout active interval , set the ip timeout interval to 30 minutes.
ce_netstream_aging:
type: ip
timeout_type: active
state: absent
provider: "{{ cli }}"
- name: Delete netstream vxlan timeout active interval , set the vxlan timeout interval to 30 minutes.
ce_netstream_aging:
type: vxlan
timeout_type: active
state: absent
provider: "{{ cli }}"
- name: Enable netstream ip tcp session timeout.
ce_netstream_aging:
type: ip
timeout_type: tcp-session
state: present
provider: "{{ cli }}"
- name: Enable netstream vxlan tcp session timeout.
ce_netstream_aging:
type: vxlan
timeout_type: tcp-session
state: present
provider: "{{ cli }}"
- name: Disable netstream ip tcp session timeout.
ce_netstream_aging:
type: ip
timeout_type: tcp-session
state: absent
provider: "{{ cli }}"
- name: Disable netstream vxlan tcp session timeout.
ce_netstream_aging:
type: vxlan
timeout_type: tcp-session
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: verbose mode
type: dict
sample: {"timeout_interval": "40",
"type": "ip",
"state": "absent",
"timeout_type": active}
existing:
description: k/v pairs of existing configuration
returned: verbose mode
type: dict
sample: {"active_timeout": [
{
"ip": "40",
"vxlan": 30
}
],
"inactive_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"tcp_timeout": [
{
"ip": "disable",
"vxlan": "disable"
}
]}
end_state:
description: k/v pairs of configuration after module execution
returned: verbose mode
type: dict
sample: {"active_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"inactive_timeout": [
{
"ip": 30,
"vxlan": 30
}
],
"tcp_timeout": [
{
"ip": "disable",
"vxlan": "disable"
}
]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["undo netstream timeout ip active 40"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import get_config, load_config
from ansible.module_utils.ce import ce_argument_spec
class NetStreamAging(object):
"""
Manages netstream aging.
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# module input info
self.timeout_interval = self.module.params['timeout_interval']
self.type = self.module.params['type']
self.state = self.module.params['state']
self.timeout_type = self.module.params['timeout_type']
self.manual_slot = self.module.params['manual_slot']
# host info
self.host = self.module.params['host']
self.username = self.module.params['username']
self.port = self.module.params['port']
# state
self.changed = False
self.updates_cmd = list()
self.commands = list()
self.results = dict()
self.proposed = dict()
self.existing = dict()
self.end_state = dict()
# local parameters
self.existing["active_timeout"] = list()
self.existing["inactive_timeout"] = list()
self.existing["tcp_timeout"] = list()
self.end_state["active_timeout"] = list()
self.end_state["inactive_timeout"] = list()
self.end_state["tcp_timeout"] = list()
self.active_changed = False
self.inactive_changed = False
self.tcp_changed = False
def init_module(self):
"""init module"""
self.module = AnsibleModule(argument_spec=self.spec, supports_check_mode=True)
def cli_load_config(self, commands):
"""load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def cli_add_command(self, command, undo=False):
"""add command to self.update_cmd and self.commands"""
if undo and command.lower() not in ["quit", "return"]:
cmd = "undo " + command
else:
cmd = command
self.commands.append(cmd)
if command.lower() not in ["quit", "return"]:
self.updates_cmd.append(cmd)
def get_exist_timer_out_para(self):
"""Get exist netstream timeout parameters"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
active_tmp["ip"] = "30"
active_tmp["vxlan"] = "30"
inactive_tmp["ip"] = "30"
inactive_tmp["vxlan"] = "30"
tcp_tmp["ip"] = "absent"
tcp_tmp["vxlan"] = "absent"
flags = list()
exp = " | ignore-case include netstream timeout"
flags.append(exp)
config = get_config(self.module, flags)
if config:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if config_mem_list[2] == "ip":
if config_mem_list[3] == "active":
active_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "inactive":
inactive_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "tcp-session":
tcp_tmp["ip"] = "present"
if config_mem_list[2] == "vxlan":
if config_mem_list[4] == "active":
active_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "inactive":
inactive_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "tcp-session":
tcp_tmp["vxlan"] = "present"
self.existing["active_timeout"].append(active_tmp)
self.existing["inactive_timeout"].append(inactive_tmp)
self.existing["tcp_timeout"].append(tcp_tmp)
def get_end_timer_out_para(self):
"""Get end netstream timeout parameters"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
active_tmp["ip"] = "30"
active_tmp["vxlan"] = "30"
inactive_tmp["ip"] = "30"
inactive_tmp["vxlan"] = "30"
tcp_tmp["ip"] = "absent"
tcp_tmp["vxlan"] = "absent"
flags = list()
exp = " | ignore-case include netstream timeout"
exp = "| ignore-case include evpn-overlay enable"
flags.append(exp)
config = get_config(self.module, flags)
if config:
config = config.lstrip()
config_list = config.split('\n')
for config_mem in config_list:
config_mem = config_mem.lstrip()
config_mem_list = config_mem.split(' ')
if config_mem_list[2] == "ip":
if config_mem_list[3] == "active":
active_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "inactive":
inactive_tmp["ip"] = config_mem_list[4]
if config_mem_list[3] == "tcp-session":
tcp_tmp["ip"] = "present"
if config_mem_list[2] == "vxlan":
if config_mem_list[4] == "active":
active_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "inactive":
inactive_tmp["vxlan"] = config_mem_list[5]
if config_mem_list[4] == "tcp-session":
tcp_tmp["vxlan"] = "present"
self.end_state["active_timeout"].append(active_tmp)
self.end_state["inactive_timeout"].append(inactive_tmp)
self.end_state["tcp_timeout"].append(tcp_tmp)
def check_params(self):
"""Check all input params"""
# interval check
if not str(self.timeout_interval).isdigit():
self.module.fail_json(
msg='Error: Timeout interval should be numerical.')
if self.timeout_type == "active":
if int(self.timeout_interval) < 1 or int(self.timeout_interval) > 60:
self.module.fail_json(
msg="Error: Active interval should between 1 - 60 minutes.")
if self.timeout_type == "inactive":
if int(self.timeout_interval) < 5 or int(self.timeout_interval) > 600:
self.module.fail_json(
msg="Error: Inactive interval should between 5 - 600 seconds.")
if self.timeout_type == "manual":
if not self.manual_slot:
self.module.fail_json(
msg="Error: If use manual timeout mode,slot number is needed.")
if not str(self.manual_slot).isdigit():
self.module.fail_json(
msg='Error: Slot number should be numerical.')
def get_proposed(self):
"""get proposed info"""
if self.timeout_interval:
self.proposed["timeout_interval"] = self.timeout_interval
if self.timeout_type:
self.proposed["timeout_type"] = self.timeout_type
if self.type:
self.proposed["type"] = self.type
if self.state:
self.proposed["state"] = self.state
if self.manual_slot:
self.proposed["manual_slot"] = self.manual_slot
def get_existing(self):
"""get existing info"""
active_tmp = dict()
inactive_tmp = dict()
tcp_tmp = dict()
self.get_exist_timer_out_para()
if self.timeout_type == "active":
for active_tmp in self.existing["active_timeout"]:
if self.state == "present":
if str(active_tmp[self.type]) != self.timeout_interval:
self.active_changed = True
else:
if self.timeout_interval != "30":
if str(active_tmp[self.type]) != "30":
if str(active_tmp[self.type]) != self.timeout_interval:
self.module.fail_json(
msg='Error: The specified active interval do not exist.')
if str(active_tmp[self.type]) != "30":
self.timeout_interval = active_tmp[self.type]
self.active_changed = True
if self.timeout_type == "inactive":
for inactive_tmp in self.existing["inactive_timeout"]:
if self.state == "present":
if str(inactive_tmp[self.type]) != self.timeout_interval:
self.inactive_changed = True
else:
if self.timeout_interval != "30":
if str(inactive_tmp[self.type]) != "30":
if str(inactive_tmp[self.type]) != self.timeout_interval:
self.module.fail_json(
msg='Error: The specified inactive interval do not exist.')
if str(inactive_tmp[self.type]) != "30":
self.timeout_interval = inactive_tmp[self.type]
self.inactive_changed = True
if self.timeout_type == "tcp-session":
for tcp_tmp in self.existing["tcp_timeout"]:
if str(tcp_tmp[self.type]) != self.state:
self.tcp_changed = True
def operate_time_out(self):
"""configure timeout parameters"""
cmd = ""
if self.timeout_type == "manual":
if self.type == "ip":
self.cli_add_command("quit")
cmd = "reset netstream cache ip slot %s" % self.manual_slot
self.cli_add_command(cmd)
elif self.type == "vxlan":
self.cli_add_command("quit")
cmd = "reset netstream cache vxlan inner-ip slot %s" % self.manual_slot
self.cli_add_command(cmd)
if not self.active_changed and not self.inactive_changed and not self.tcp_changed:
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
return
if self.active_changed or self.inactive_changed:
if self.type == "ip":
cmd = "netstream timeout ip %s %s" % (self.timeout_type, self.timeout_interval)
elif self.type == "vxlan":
cmd = "netstream timeout vxlan inner-ip %s %s" % (self.timeout_type, self.timeout_interval)
if self.state == "absent":
self.cli_add_command(cmd, undo=True)
else:
self.cli_add_command(cmd)
if self.timeout_type == "tcp-session" and self.tcp_changed:
if self.type == "ip":
if self.state == "present":
cmd = "netstream timeout ip tcp-session"
else:
cmd = "undo netstream timeout ip tcp-session"
elif self.type == "vxlan":
if self.state == "present":
cmd = "netstream timeout vxlan inner-ip tcp-session"
else:
cmd = "undo netstream timeout vxlan inner-ip tcp-session"
self.cli_add_command(cmd)
if self.commands:
self.cli_load_config(self.commands)
self.changed = True
def get_end_state(self):
"""get end state info"""
self.get_end_timer_out_para()
def work(self):
"""worker"""
self.check_params()
self.get_existing()
self.get_proposed()
self.operate_time_out()
self.get_end_state()
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
"""Module main"""
argument_spec = dict(
timeout_interval=dict(required=False, type='str', default='30'),
type=dict(required=False, choices=['ip', 'vxlan']),
state=dict(required=False, choices=['present', 'absent'], default='present'),
timeout_type=dict(required=False, choices=['active', 'inactive', 'tcp-session', 'manual']),
manual_slot=dict(required=False, type='str'),
)
argument_spec.update(ce_argument_spec)
module = NetStreamAging(argument_spec)
module.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
Praveen-1987/devstack-Quantumleap | files/pip-1.4.1/pip/commands/__init__.py | 476 | 2236 | """
Package containing all pip commands
"""
from pip.commands.bundle import BundleCommand
from pip.commands.completion import CompletionCommand
from pip.commands.freeze import FreezeCommand
from pip.commands.help import HelpCommand
from pip.commands.list import ListCommand
from pip.commands.search import SearchCommand
from pip.commands.show import ShowCommand
from pip.commands.install import InstallCommand
from pip.commands.uninstall import UninstallCommand
from pip.commands.unzip import UnzipCommand
from pip.commands.zip import ZipCommand
from pip.commands.wheel import WheelCommand
commands = {
BundleCommand.name: BundleCommand,
CompletionCommand.name: CompletionCommand,
FreezeCommand.name: FreezeCommand,
HelpCommand.name: HelpCommand,
SearchCommand.name: SearchCommand,
ShowCommand.name: ShowCommand,
InstallCommand.name: InstallCommand,
UninstallCommand.name: UninstallCommand,
UnzipCommand.name: UnzipCommand,
ZipCommand.name: ZipCommand,
ListCommand.name: ListCommand,
WheelCommand.name: WheelCommand,
}
commands_order = [
InstallCommand,
UninstallCommand,
FreezeCommand,
ListCommand,
ShowCommand,
SearchCommand,
WheelCommand,
ZipCommand,
UnzipCommand,
BundleCommand,
HelpCommand,
]
def get_summaries(ignore_hidden=True, ordered=True):
"""Yields sorted (command name, command summary) tuples."""
if ordered:
cmditems = _sort_commands(commands, commands_order)
else:
cmditems = commands.items()
for name, command_class in cmditems:
if ignore_hidden and command_class.hidden:
continue
yield (name, command_class.summary)
def get_similar_commands(name):
"""Command name auto-correct."""
from difflib import get_close_matches
close_commands = get_close_matches(name, commands.keys())
if close_commands:
guess = close_commands[0]
else:
guess = False
return guess
def _sort_commands(cmddict, order):
def keyfn(key):
try:
return order.index(key[1])
except ValueError:
# unordered items should come last
return 0xff
return sorted(cmddict.items(), key=keyfn)
| apache-2.0 |
jokajak/itweb | data/env/lib/python2.6/site-packages/nose-0.11.4-py2.6.egg/nose/plugins/testid.py | 3 | 9606 | """
This plugin adds a test id (like #1) to each test name output. After
you've run once to generate test ids, you can re-run individual
tests by activating the plugin and passing the ids (with or
without the # prefix) instead of test names.
For example, if your normal test run looks like::
% nosetests -v
tests.test_a ... ok
tests.test_b ... ok
tests.test_c ... ok
When adding ``--with-id`` you'll see::
% nosetests -v --with-id
#1 tests.test_a ... ok
#2 tests.test_b ... ok
#2 tests.test_c ... ok
Then you can re-run individual tests by supplying just an id number::
% nosetests -v --with-id 2
#2 tests.test_b ... ok
You can also pass multiple id numbers::
% nosetests -v --with-id 2 3
#2 tests.test_b ... ok
#3 tests.test_c ... ok
Since most shells consider '#' a special character, you can leave it out when
specifying a test id.
Note that when run without the -v switch, no special output is displayed, but
the ids file is still written.
Looping over failed tests
-------------------------
This plugin also adds a mode that will direct the test runner to record
failed tests. Subsequent test runs will then run only the tests that failed
last time. Activate this mode with the ``--failed`` switch::
% nosetests -v --failed
#1 test.test_a ... ok
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
#4 test.test_d ... ok
On the second run, only tests #2 and #3 will run::
% nosetests -v --failed
#2 test.test_b ... ERROR
#3 test.test_c ... FAILED
As you correct errors and tests pass, they'll drop out of subsequent runs.
First::
% nosetests -v --failed
#2 test.test_b ... ok
#3 test.test_c ... FAILED
Second::
% nosetests -v --failed
#3 test.test_c ... FAILED
When all tests pass, the full set will run on the next invocation.
First::
% nosetests -v --failed
#3 test.test_c ... ok
Second::
% nosetests -v --failed
#1 test.test_a ... ok
#2 test.test_b ... ok
#3 test.test_c ... ok
#4 test.test_d ... ok
.. note ::
If you expect to use ``--failed`` regularly, it's a good idea to always run
run using the ``--with-id`` option. This will ensure that an id file is
always created, allowing you to add ``--failed`` to the command line as soon
as you have failing tests. Otherwise, your first run using ``--failed`` will
(perhaps surprisingly) run *all* tests, because there won't be an id file
containing the record of failed tests from your previous run.
"""
__test__ = False
import logging
import os
from nose.plugins import Plugin
from nose.util import src, set
try:
from cPickle import dump, load
except ImportError:
from pickle import dump, load
log = logging.getLogger(__name__)
class TestId(Plugin):
"""
Activate to add a test id (like #1) to each test name output. Activate
with --failed to rerun failing tests only.
"""
name = 'id'
idfile = None
collecting = True
loopOnFailed = False
def options(self, parser, env):
"""Register commandline options.
"""
Plugin.options(self, parser, env)
parser.add_option('--id-file', action='store', dest='testIdFile',
default='.noseids', metavar="FILE",
help="Store test ids found in test runs in this "
"file. Default is the file .noseids in the "
"working directory.")
parser.add_option('--failed', action='store_true',
dest='failed', default=False,
help="Run the tests that failed in the last "
"test run.")
def configure(self, options, conf):
"""Configure plugin.
"""
Plugin.configure(self, options, conf)
if options.failed:
self.enabled = True
self.loopOnFailed = True
log.debug("Looping on failed tests")
self.idfile = os.path.expanduser(options.testIdFile)
if not os.path.isabs(self.idfile):
self.idfile = os.path.join(conf.workingDir, self.idfile)
self.id = 1
# Ids and tests are mirror images: ids are {id: test address} and
# tests are {test address: id}
self.ids = {}
self.tests = {}
self.failed = []
self.source_names = []
# used to track ids seen when tests is filled from
# loaded ids file
self._seen = {}
self._write_hashes = options.verbosity >= 2
def finalize(self, result):
"""Save new ids file, if needed.
"""
if result.wasSuccessful():
self.failed = []
if self.collecting:
ids = dict(zip(self.tests.values(), self.tests.keys()))
else:
ids = self.ids
fh = open(self.idfile, 'w')
dump({'ids': ids,
'failed': self.failed,
'source_names': self.source_names}, fh)
fh.close()
log.debug('Saved test ids: %s, failed %s to %s',
ids, self.failed, self.idfile)
def loadTestsFromNames(self, names, module=None):
"""Translate ids in the list of requested names into their
test addresses, if they are found in my dict of tests.
"""
log.debug('ltfn %s %s', names, module)
try:
fh = open(self.idfile, 'r')
data = load(fh)
if 'ids' in data:
self.ids = data['ids']
self.failed = data['failed']
self.source_names = data['source_names']
else:
# old ids field
self.ids = data
self.failed = []
self.source_names = names
if self.ids:
self.id = max(self.ids) + 1
self.tests = dict(zip(self.ids.values(), self.ids.keys()))
else:
self.id = 1
log.debug(
'Loaded test ids %s tests %s failed %s sources %s from %s',
self.ids, self.tests, self.failed, self.source_names,
self.idfile)
fh.close()
except IOError:
log.debug('IO error reading %s', self.idfile)
if self.loopOnFailed and self.failed:
self.collecting = False
names = self.failed
self.failed = []
# I don't load any tests myself, only translate names like '#2'
# into the associated test addresses
translated = []
new_source = []
really_new = []
for name in names:
trans = self.tr(name)
if trans != name:
translated.append(trans)
else:
new_source.append(name)
# names that are not ids and that are not in the current
# list of source names go into the list for next time
if new_source:
new_set = set(new_source)
old_set = set(self.source_names)
log.debug("old: %s new: %s", old_set, new_set)
really_new = [s for s in new_source
if not s in old_set]
if really_new:
# remember new sources
self.source_names.extend(really_new)
if not translated:
# new set of source names, no translations
# means "run the requested tests"
names = new_source
else:
# no new names to translate and add to id set
self.collecting = False
log.debug("translated: %s new sources %s names %s",
translated, really_new, names)
return (None, translated + really_new or names)
def makeName(self, addr):
log.debug("Make name %s", addr)
filename, module, call = addr
if filename is not None:
head = src(filename)
else:
head = module
if call is not None:
return "%s:%s" % (head, call)
return head
def setOutputStream(self, stream):
"""Get handle on output stream so the plugin can print id #s
"""
self.stream = stream
def startTest(self, test):
"""Maybe output an id # before the test name.
Example output::
#1 test.test ... ok
#2 test.test_two ... ok
"""
adr = test.address()
log.debug('start test %s (%s)', adr, adr in self.tests)
if adr in self.tests:
if adr in self._seen:
self.write(' ')
else:
self.write('#%s ' % self.tests[adr])
self._seen[adr] = 1
return
self.tests[adr] = self.id
self.write('#%s ' % self.id)
self.id += 1
def afterTest(self, test):
# None means test never ran, False means failed/err
if test.passed is False:
try:
key = str(self.tests[test.address()])
except KeyError:
# never saw this test -- startTest didn't run
pass
else:
if key not in self.failed:
self.failed.append(key)
def tr(self, name):
log.debug("tr '%s'", name)
try:
key = int(name.replace('#', ''))
except ValueError:
return name
log.debug("Got key %s", key)
# I'm running tests mapped from the ids file,
# not collecting new ones
if key in self.ids:
return self.makeName(self.ids[key])
return name
def write(self, output):
if self._write_hashes:
self.stream.write(output)
| gpl-3.0 |
sonnyhu/numpy | numpy/distutils/fcompiler/g95.py | 229 | 1379 | # http://g95.sourceforge.net/
from __future__ import division, absolute_import, print_function
from numpy.distutils.fcompiler import FCompiler
compilers = ['G95FCompiler']
class G95FCompiler(FCompiler):
compiler_type = 'g95'
description = 'G95 Fortran Compiler'
# version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95!\) (?P<version>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95!) May 22 2006)
version_pattern = r'G95 \((GCC (?P<gccversion>[\d.]+)|.*?) \(g95 (?P<version>.*)!\) (?P<date>.*)\).*'
# $ g95 --version
# G95 (GCC 4.0.3 (g95 0.90!) Aug 22 2006)
executables = {
'version_cmd' : ["<F90>", "--version"],
'compiler_f77' : ["g95", "-ffixed-form"],
'compiler_fix' : ["g95", "-ffixed-form"],
'compiler_f90' : ["g95"],
'linker_so' : ["<F90>", "-shared"],
'archiver' : ["ar", "-cr"],
'ranlib' : ["ranlib"]
}
pic_flags = ['-fpic']
module_dir_switch = '-fmod='
module_include_switch = '-I'
def get_flags(self):
return ['-fno-second-underscore']
def get_flags_opt(self):
return ['-O']
def get_flags_debug(self):
return ['-g']
if __name__ == '__main__':
from distutils import log
log.set_verbosity(2)
compiler = G95FCompiler()
compiler.customize()
print(compiler.get_version())
| bsd-3-clause |
CasparLi/calibre | src/calibre/ebooks/conversion/plugins/oeb_output.py | 19 | 4883 | from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, re
from calibre.customize.conversion import (OutputFormatPlugin,
OptionRecommendation)
from calibre import CurrentDir
class OEBOutput(OutputFormatPlugin):
name = 'OEB Output'
author = 'Kovid Goyal'
file_type = 'oeb'
recommendations = set([('pretty_print', True, OptionRecommendation.HIGH)])
def convert(self, oeb_book, output_path, input_plugin, opts, log):
from urllib import unquote
from lxml import etree
self.log, self.opts = log, opts
if not os.path.exists(output_path):
os.makedirs(output_path)
from calibre.ebooks.oeb.base import OPF_MIME, NCX_MIME, PAGE_MAP_MIME, OEB_STYLES
from calibre.ebooks.oeb.normalize_css import condense_sheet
with CurrentDir(output_path):
results = oeb_book.to_opf2(page_map=True)
for key in (OPF_MIME, NCX_MIME, PAGE_MAP_MIME):
href, root = results.pop(key, [None, None])
if root is not None:
if key == OPF_MIME:
try:
self.workaround_nook_cover_bug(root)
except:
self.log.exception('Something went wrong while trying to'
' workaround Nook cover bug, ignoring')
try:
self.workaround_pocketbook_cover_bug(root)
except:
self.log.exception('Something went wrong while trying to'
' workaround Pocketbook cover bug, ignoring')
self.migrate_lang_code(root)
raw = etree.tostring(root, pretty_print=True,
encoding='utf-8', xml_declaration=True)
if key == OPF_MIME:
# Needed as I can't get lxml to output opf:role and
# not output <opf:metadata> as well
raw = re.sub(r'(<[/]{0,1})opf:', r'\1', raw)
with open(href, 'wb') as f:
f.write(raw)
for item in oeb_book.manifest:
if (
not self.opts.expand_css and
item.media_type in OEB_STYLES and hasattr(item.data, 'cssText') and
'nook' not in self.opts.output_profile.short_name):
condense_sheet(item.data)
path = os.path.abspath(unquote(item.href))
dir = os.path.dirname(path)
if not os.path.exists(dir):
os.makedirs(dir)
with open(path, 'wb') as f:
f.write(str(item))
item.unload_data_from_memory(memory=path)
def workaround_nook_cover_bug(self, root): # {{{
cov = root.xpath('//*[local-name() = "meta" and @name="cover" and'
' @content != "cover"]')
def manifest_items_with_id(id_):
return root.xpath('//*[local-name() = "manifest"]/*[local-name() = "item" '
' and @id="%s"]'%id_)
if len(cov) == 1:
cov = cov[0]
covid = cov.get('content', '')
if covid:
manifest_item = manifest_items_with_id(covid)
if len(manifest_item) == 1 and \
manifest_item[0].get('media-type',
'').startswith('image/'):
self.log.warn('The cover image has an id != "cover". Renaming'
' to work around bug in Nook Color')
from calibre.ebooks.oeb.base import uuid_id
newid = uuid_id()
for item in manifest_items_with_id('cover'):
item.set('id', newid)
for x in root.xpath('//*[@idref="cover"]'):
x.set('idref', newid)
manifest_item = manifest_item[0]
manifest_item.set('id', 'cover')
cov.set('content', 'cover')
# }}}
def workaround_pocketbook_cover_bug(self, root): # {{{
m = root.xpath('//*[local-name() = "manifest"]/*[local-name() = "item" '
' and @id="cover"]')
if len(m) == 1:
m = m[0]
p = m.getparent()
p.remove(m)
p.insert(0, m)
# }}}
def migrate_lang_code(self, root): # {{{
from calibre.utils.localization import lang_as_iso639_1
for lang in root.xpath('//*[local-name() = "language"]'):
clc = lang_as_iso639_1(lang.text)
if clc:
lang.text = clc
# }}}
| gpl-3.0 |
tsdmgz/ansible | lib/ansible/modules/packaging/os/package.py | 48 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: package
version_added: 2.0
author:
- Ansible Inc
short_description: Generic OS package manager
description:
- Installs, upgrade and removes packages using the underlying OS package manager.
- For Windows targets, use the M(win_package) module instead.
options:
name:
description:
- "Package name, or package specifier with version, like C(name-1.0)."
- "Be aware that packages are not always named the same and this module will not 'translate' them per distro."
required: true
state:
description:
- Whether to install (C(present), or remove (C(absent)) a package. Other states depend on the underlying package module, i.e C(latest).
required: true
use:
description:
- The required package manager module to use (yum, apt, etc). The default 'auto' will use existing facts or try to autodetect it.
- You should only use this field if the automatic selection is not working for some reason.
required: false
default: auto
requirements:
- Whatever is required for the package plugins specific for each system.
notes:
- This module actually calls the pertinent package modules for each system (apt, yum, etc).
- For Windows targets, use the M(win_package) module instead.
'''
EXAMPLES = '''
- name: install ntpdate
package:
name: ntpdate
state: present
# This uses a variable as this changes per distribution.
- name: remove the apache package
package:
name: "{{ apache }}"
state: absent
'''
| gpl-3.0 |
Kongsea/tensorflow | tensorflow/contrib/gan/python/estimator/python/head_test.py | 27 | 3196 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TFGAN's head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples
from tensorflow.contrib.gan.python.estimator.python import head
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
from tensorflow.python.training import training
def dummy_loss(gan_model, add_summaries=True): # pylint:disable=unused-argument
return math_ops.reduce_sum(gan_model.discriminator_real_outputs -
gan_model.discriminator_gen_outputs)
def get_gan_model():
# TODO(joelshor): Find a better way of creating a variable scope.
with variable_scope.variable_scope('generator') as gen_scope:
gen_var = variable_scope.get_variable('dummy_var', initializer=0.0)
with variable_scope.variable_scope('discriminator') as dis_scope:
dis_var = variable_scope.get_variable('dummy_var', initializer=0.0)
return tfgan_tuples.GANModel(
generator_inputs=None,
generated_data=array_ops.ones([3, 4]),
generator_variables=[gen_var],
generator_scope=gen_scope,
generator_fn=None,
real_data=None,
discriminator_real_outputs=array_ops.ones([1, 2, 3]) * dis_var,
discriminator_gen_outputs=array_ops.ones([1, 2, 3]) * gen_var * dis_var,
discriminator_variables=[dis_var],
discriminator_scope=dis_scope,
discriminator_fn=None)
class GANHeadTest(test.TestCase):
def setUp(self):
super(GANHeadTest, self).setUp()
self.gan_head = head.gan_head(
generator_loss_fn=dummy_loss,
discriminator_loss_fn=dummy_loss,
generator_optimizer=training.GradientDescentOptimizer(1.0),
discriminator_optimizer=training.GradientDescentOptimizer(1.0))
self.assertTrue(isinstance(self.gan_head, head.GANHead))
def _test_modes_helper(self, mode):
self.gan_head.create_estimator_spec(
features=None,
mode=mode,
logits=get_gan_model())
def test_modes_predict(self):
self._test_modes_helper(model_fn_lib.ModeKeys.PREDICT)
def test_modes_eval(self):
self._test_modes_helper(model_fn_lib.ModeKeys.EVAL)
def test_modes_train(self):
self._test_modes_helper(model_fn_lib.ModeKeys.TRAIN)
if __name__ == '__main__':
test.main()
| apache-2.0 |
goliate/sarakha63-persomov | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/tvigle.py | 22 | 2889 | # encoding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
float_or_none,
parse_age_limit,
)
class TvigleIE(InfoExtractor):
IE_NAME = 'tvigle'
IE_DESC = 'Интернет-телевидение Tvigle.ru'
_VALID_URL = r'http://(?:www\.)?tvigle\.ru/(?:[^/]+/)+(?P<id>[^/]+)/$'
_TESTS = [
{
'url': 'http://www.tvigle.ru/video/sokrat/',
'md5': '36514aed3657d4f70b4b2cef8eb520cd',
'info_dict': {
'id': '1848932',
'display_id': 'sokrat',
'ext': 'flv',
'title': 'Сократ',
'description': 'md5:a05bd01be310074d5833efc6743be95e',
'duration': 6586,
'age_limit': 0,
},
},
{
'url': 'http://www.tvigle.ru/video/vladimir-vysotskii/vedushchii-teleprogrammy-60-minut-ssha-o-vladimire-vysotskom/',
'md5': 'd9012d7c7c598fe7a11d7fb46dc1f574',
'info_dict': {
'id': '5142516',
'ext': 'mp4',
'title': 'Ведущий телепрограммы «60 минут» (США) о Владимире Высоцком',
'description': 'md5:027f7dc872948f14c96d19b4178428a4',
'duration': 186.080,
'age_limit': 0,
},
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
video_id = self._html_search_regex(
r'<li class="video-preview current_playing" id="(\d+)">', webpage, 'video id')
video_data = self._download_json(
'http://cloud.tvigle.ru/api/play/video/%s/' % video_id, display_id)
item = video_data['playlist']['items'][0]
title = item['title']
description = item['description']
thumbnail = item['thumbnail']
duration = float_or_none(item.get('durationMilliseconds'), 1000)
age_limit = parse_age_limit(item.get('ageRestrictions'))
formats = []
for vcodec, fmts in item['videos'].items():
for quality, video_url in fmts.items():
formats.append({
'url': video_url,
'format_id': '%s-%s' % (vcodec, quality),
'vcodec': vcodec,
'height': int(quality[:-1]),
'filesize': item['video_files_size'][vcodec][quality],
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'duration': duration,
'age_limit': age_limit,
'formats': formats,
}
| gpl-3.0 |
le9i0nx/ansible | test/runner/test.py | 12 | 24409 | #!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
"""Test runner for all Ansible tests."""
from __future__ import absolute_import, print_function
import errno
import os
import sys
from lib.util import (
ApplicationError,
display,
raw_command,
find_pip,
get_docker_completion,
)
from lib.delegation import (
delegate,
)
from lib.executor import (
command_posix_integration,
command_network_integration,
command_windows_integration,
command_units,
command_compile,
command_shell,
SUPPORTED_PYTHON_VERSIONS,
COMPILE_PYTHON_VERSIONS,
ApplicationWarning,
Delegate,
generate_pip_install,
check_startup,
)
from lib.config import (
IntegrationConfig,
PosixIntegrationConfig,
WindowsIntegrationConfig,
NetworkIntegrationConfig,
SanityConfig,
UnitsConfig,
CompileConfig,
ShellConfig,
)
from lib.sanity import (
command_sanity,
sanity_init,
sanity_get_tests,
)
from lib.target import (
find_target_completion,
walk_posix_integration_targets,
walk_network_integration_targets,
walk_windows_integration_targets,
walk_units_targets,
walk_compile_targets,
walk_sanity_targets,
)
from lib.core_ci import (
AWS_ENDPOINTS,
)
from lib.cloud import (
initialize_cloud_plugins,
)
import lib.cover
def main():
"""Main program function."""
try:
git_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..'))
os.chdir(git_root)
initialize_cloud_plugins()
sanity_init()
args = parse_args()
config = args.config(args)
display.verbosity = config.verbosity
display.color = config.color
display.info_stderr = (isinstance(config, SanityConfig) and config.lint) or (isinstance(config, IntegrationConfig) and config.list_targets)
check_startup()
try:
args.func(config)
except Delegate as ex:
delegate(config, ex.exclude, ex.require)
display.review_warnings()
except ApplicationWarning as ex:
display.warning(str(ex))
exit(0)
except ApplicationError as ex:
display.error(str(ex))
exit(1)
except KeyboardInterrupt:
exit(2)
except IOError as ex:
if ex.errno == errno.EPIPE:
exit(3)
raise
def parse_args():
"""Parse command line arguments."""
try:
import argparse
except ImportError:
if '--requirements' not in sys.argv:
raise
raw_command(generate_pip_install(find_pip(), 'ansible-test'))
import argparse
try:
import argcomplete
except ImportError:
argcomplete = None
if argcomplete:
epilog = 'Tab completion available using the "argcomplete" python package.'
else:
epilog = 'Install the "argcomplete" python package to enable tab completion.'
parser = argparse.ArgumentParser(epilog=epilog)
common = argparse.ArgumentParser(add_help=False)
common.add_argument('-e', '--explain',
action='store_true',
help='explain commands that would be executed')
common.add_argument('-v', '--verbose',
dest='verbosity',
action='count',
default=0,
help='display more output')
common.add_argument('--color',
metavar='COLOR',
nargs='?',
help='generate color output: %(choices)s',
choices=('yes', 'no', 'auto'),
const='yes',
default='auto')
common.add_argument('--debug',
action='store_true',
help='run ansible commands in debug mode')
test = argparse.ArgumentParser(add_help=False, parents=[common])
test.add_argument('include',
metavar='TARGET',
nargs='*',
help='test the specified target').completer = complete_target
test.add_argument('--exclude',
metavar='TARGET',
action='append',
help='exclude the specified target').completer = complete_target
test.add_argument('--require',
metavar='TARGET',
action='append',
help='require the specified target').completer = complete_target
test.add_argument('--coverage',
action='store_true',
help='analyze code coverage when running tests')
test.add_argument('--coverage-label',
default='',
help='label to include in coverage output file names')
test.add_argument('--metadata',
help=argparse.SUPPRESS)
add_changes(test, argparse)
add_environments(test)
integration = argparse.ArgumentParser(add_help=False, parents=[test])
integration.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
integration.add_argument('--start-at',
metavar='TARGET',
help='start at the specified target').completer = complete_target
integration.add_argument('--start-at-task',
metavar='TASK',
help='start at the specified task')
integration.add_argument('--tags',
metavar='TAGS',
help='only run plays and tasks tagged with these values')
integration.add_argument('--skip-tags',
metavar='TAGS',
help='only run plays and tasks whose tags do not match these values')
integration.add_argument('--diff',
action='store_true',
help='show diff output')
integration.add_argument('--allow-destructive',
action='store_true',
help='allow destructive tests (--local and --tox only)')
integration.add_argument('--retry-on-error',
action='store_true',
help='retry failed test with increased verbosity')
integration.add_argument('--continue-on-error',
action='store_true',
help='continue after failed test')
integration.add_argument('--debug-strategy',
action='store_true',
help='run test playbooks using the debug strategy')
integration.add_argument('--changed-all-target',
metavar='TARGET',
default='all',
help='target to run when all tests are needed')
integration.add_argument('--list-targets',
action='store_true',
help='list matching targets instead of running tests')
subparsers = parser.add_subparsers(metavar='COMMAND')
subparsers.required = True # work-around for python 3 bug which makes subparsers optional
posix_integration = subparsers.add_parser('integration',
parents=[integration],
help='posix integration tests')
posix_integration.set_defaults(func=command_posix_integration,
targets=walk_posix_integration_targets,
config=PosixIntegrationConfig)
add_extra_docker_options(posix_integration)
network_integration = subparsers.add_parser('network-integration',
parents=[integration],
help='network integration tests')
network_integration.set_defaults(func=command_network_integration,
targets=walk_network_integration_targets,
config=NetworkIntegrationConfig)
add_extra_docker_options(network_integration, integration=False)
network_integration.add_argument('--platform',
metavar='PLATFORM',
action='append',
help='network platform/version').completer = complete_network_platform
network_integration.add_argument('--inventory',
metavar='PATH',
help='path to inventory used for tests')
windows_integration = subparsers.add_parser('windows-integration',
parents=[integration],
help='windows integration tests')
windows_integration.set_defaults(func=command_windows_integration,
targets=walk_windows_integration_targets,
config=WindowsIntegrationConfig)
add_extra_docker_options(windows_integration, integration=False)
windows_integration.add_argument('--windows',
metavar='VERSION',
action='append',
help='windows version').completer = complete_windows
units = subparsers.add_parser('units',
parents=[test],
help='unit tests')
units.set_defaults(func=command_units,
targets=walk_units_targets,
config=UnitsConfig)
units.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
units.add_argument('--collect-only',
action='store_true',
help='collect tests but do not execute them')
add_extra_docker_options(units, integration=False)
compiler = subparsers.add_parser('compile',
parents=[test],
help='compile tests')
compiler.set_defaults(func=command_compile,
targets=walk_compile_targets,
config=CompileConfig)
compiler.add_argument('--python',
metavar='VERSION',
choices=COMPILE_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(COMPILE_PYTHON_VERSIONS))
add_lint(compiler)
add_extra_docker_options(compiler, integration=False)
sanity = subparsers.add_parser('sanity',
parents=[test],
help='sanity tests')
sanity.set_defaults(func=command_sanity,
targets=walk_sanity_targets,
config=SanityConfig)
sanity.add_argument('--test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to run').completer = complete_sanity_test
sanity.add_argument('--skip-test',
metavar='TEST',
action='append',
choices=[test.name for test in sanity_get_tests()],
help='tests to skip').completer = complete_sanity_test
sanity.add_argument('--list-tests',
action='store_true',
help='list available tests')
sanity.add_argument('--python',
metavar='VERSION',
choices=SUPPORTED_PYTHON_VERSIONS + ('default',),
help='python version: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
sanity.add_argument('--base-branch',
help=argparse.SUPPRESS)
add_lint(sanity)
add_extra_docker_options(sanity, integration=False)
shell = subparsers.add_parser('shell',
parents=[common],
help='open an interactive shell')
shell.set_defaults(func=command_shell,
config=ShellConfig)
add_environments(shell, tox_version=True)
add_extra_docker_options(shell)
coverage_common = argparse.ArgumentParser(add_help=False, parents=[common])
add_environments(coverage_common, tox_version=True, tox_only=True)
coverage = subparsers.add_parser('coverage',
help='code coverage management and reporting')
coverage_subparsers = coverage.add_subparsers(metavar='COMMAND')
coverage_subparsers.required = True # work-around for python 3 bug which makes subparsers optional
coverage_combine = coverage_subparsers.add_parser('combine',
parents=[coverage_common],
help='combine coverage data and rewrite remote paths')
coverage_combine.set_defaults(func=lib.cover.command_coverage_combine,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_combine)
coverage_erase = coverage_subparsers.add_parser('erase',
parents=[coverage_common],
help='erase coverage data files')
coverage_erase.set_defaults(func=lib.cover.command_coverage_erase,
config=lib.cover.CoverageConfig)
coverage_report = coverage_subparsers.add_parser('report',
parents=[coverage_common],
help='generate console coverage report')
coverage_report.set_defaults(func=lib.cover.command_coverage_report,
config=lib.cover.CoverageReportConfig)
coverage_report.add_argument('--show-missing',
action='store_true',
help='show line numbers of statements not executed')
add_extra_coverage_options(coverage_report)
coverage_html = coverage_subparsers.add_parser('html',
parents=[coverage_common],
help='generate html coverage report')
coverage_html.set_defaults(func=lib.cover.command_coverage_html,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_html)
coverage_xml = coverage_subparsers.add_parser('xml',
parents=[coverage_common],
help='generate xml coverage report')
coverage_xml.set_defaults(func=lib.cover.command_coverage_xml,
config=lib.cover.CoverageConfig)
add_extra_coverage_options(coverage_xml)
if argcomplete:
argcomplete.autocomplete(parser, always_complete_options=False, validator=lambda i, k: True)
args = parser.parse_args()
if args.explain and not args.verbosity:
args.verbosity = 1
if args.color == 'yes':
args.color = True
elif args.color == 'no':
args.color = False
else:
args.color = sys.stdout.isatty()
return args
def add_lint(parser):
"""
:type parser: argparse.ArgumentParser
"""
parser.add_argument('--lint',
action='store_true',
help='write lint output to stdout, everything else stderr')
parser.add_argument('--junit',
action='store_true',
help='write test failures to junit xml files')
parser.add_argument('--failure-ok',
action='store_true',
help='exit successfully on failed tests after saving results')
def add_changes(parser, argparse):
"""
:type parser: argparse.ArgumentParser
:type argparse: argparse
"""
parser.add_argument('--changed', action='store_true', help='limit targets based on changes')
changes = parser.add_argument_group(title='change detection arguments')
changes.add_argument('--tracked', action='store_true', help=argparse.SUPPRESS)
changes.add_argument('--untracked', action='store_true', help='include untracked files')
changes.add_argument('--ignore-committed', dest='committed', action='store_false', help='exclude committed files')
changes.add_argument('--ignore-staged', dest='staged', action='store_false', help='exclude staged files')
changes.add_argument('--ignore-unstaged', dest='unstaged', action='store_false', help='exclude unstaged files')
changes.add_argument('--changed-from', metavar='PATH', help=argparse.SUPPRESS)
changes.add_argument('--changed-path', metavar='PATH', action='append', help=argparse.SUPPRESS)
def add_environments(parser, tox_version=False, tox_only=False):
"""
:type parser: argparse.ArgumentParser
:type tox_version: bool
:type tox_only: bool
"""
parser.add_argument('--requirements',
action='store_true',
help='install command requirements')
environments = parser.add_mutually_exclusive_group()
environments.add_argument('--local',
action='store_true',
help='run from the local environment')
if tox_version:
environments.add_argument('--tox',
metavar='VERSION',
nargs='?',
default=None,
const='.'.join(str(i) for i in sys.version_info[:2]),
choices=SUPPORTED_PYTHON_VERSIONS,
help='run from a tox virtualenv: %s' % ', '.join(SUPPORTED_PYTHON_VERSIONS))
else:
environments.add_argument('--tox',
action='store_true',
help='run from a tox virtualenv')
tox = parser.add_argument_group(title='tox arguments')
tox.add_argument('--tox-sitepackages',
action='store_true',
help='allow access to globally installed packages')
if tox_only:
environments.set_defaults(
docker=None,
remote=None,
remote_stage=None,
remote_provider=None,
remote_aws_region=None,
remote_terminate=None,
)
return
environments.add_argument('--docker',
metavar='IMAGE',
nargs='?',
default=None,
const='default',
help='run from a docker container').completer = complete_docker
environments.add_argument('--remote',
metavar='PLATFORM',
default=None,
help='run from a remote instance').completer = complete_remote
remote = parser.add_argument_group(title='remote arguments')
remote.add_argument('--remote-stage',
metavar='STAGE',
help='remote stage to use: %(choices)s',
choices=['prod', 'dev'],
default='prod')
remote.add_argument('--remote-provider',
metavar='PROVIDER',
help='remote provider to use: %(choices)s',
choices=['default', 'aws', 'azure', 'parallels'],
default='default')
remote.add_argument('--remote-aws-region',
metavar='REGION',
help='remote aws region to use: %(choices)s (default: auto)',
choices=sorted(AWS_ENDPOINTS),
default=None)
remote.add_argument('--remote-terminate',
metavar='WHEN',
help='terminate remote instance: %(choices)s (default: %(default)s)',
choices=['never', 'always', 'success'],
default='never')
def add_extra_coverage_options(parser):
"""
:type parser: argparse.ArgumentParser
"""
parser.add_argument('--group-by',
metavar='GROUP',
action='append',
choices=lib.cover.COVERAGE_GROUPS,
help='group output by: %s' % ', '.join(lib.cover.COVERAGE_GROUPS))
parser.add_argument('--all',
action='store_true',
help='include all python source files')
parser.add_argument('--stub',
action='store_true',
help='generate empty report of all python source files')
def add_extra_docker_options(parser, integration=True):
"""
:type parser: argparse.ArgumentParser
:type integration: bool
"""
docker = parser.add_argument_group(title='docker arguments')
docker.add_argument('--docker-no-pull',
action='store_false',
dest='docker_pull',
help='do not explicitly pull the latest docker images')
docker.add_argument('--docker-keep-git',
action='store_true',
help='transfer git related files into the docker container')
if not integration:
return
docker.add_argument('--docker-util',
metavar='IMAGE',
default='httptester',
help='docker utility image to provide test services')
docker.add_argument('--docker-privileged',
action='store_true',
help='run docker container in privileged mode')
def complete_target(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
return find_target_completion(parsed_args.targets, prefix)
def complete_remote(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
with open('test/runner/completion/remote.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix)]
def complete_docker(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
images = sorted(get_docker_completion().keys())
return [i for i in images if i.startswith(prefix)]
def complete_windows(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
with open('test/runner/completion/windows.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix) and (not parsed_args.windows or i not in parsed_args.windows)]
def complete_network_platform(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
with open('test/runner/completion/network.txt', 'r') as completion_fd:
images = completion_fd.read().splitlines()
return [i for i in images if i.startswith(prefix) and (not parsed_args.platform or i not in parsed_args.platform)]
def complete_sanity_test(prefix, parsed_args, **_):
"""
:type prefix: unicode
:type parsed_args: any
:rtype: list[str]
"""
del parsed_args
tests = sorted(t.name for t in sanity_get_tests())
return [i for i in tests if i.startswith(prefix)]
if __name__ == '__main__':
main()
| gpl-3.0 |
adieu/django-nonrel | tests/regressiontests/utils/http.py | 48 | 1042 | from django.utils import http
from django.utils import unittest
class TestUtilsHttp(unittest.TestCase):
def test_same_origin_true(self):
# Identical
self.assertTrue(http.same_origin('http://foo.com/', 'http://foo.com/'))
# One with trailing slash - see #15617
self.assertTrue(http.same_origin('http://foo.com', 'http://foo.com/'))
self.assertTrue(http.same_origin('http://foo.com/', 'http://foo.com'))
# With port
self.assertTrue(http.same_origin('https://foo.com:8000', 'https://foo.com:8000/'))
def test_same_origin_false(self):
# Different scheme
self.assertFalse(http.same_origin('http://foo.com', 'https://foo.com'))
# Different host
self.assertFalse(http.same_origin('http://foo.com', 'http://goo.com'))
# Different host again
self.assertFalse(http.same_origin('http://foo.com', 'http://foo.com.evil.com'))
# Different port
self.assertFalse(http.same_origin('http://foo.com:8000', 'http://foo.com:8001'))
| bsd-3-clause |
unseenlaser/linuxcnc | configs/sim/remap/iocontrol-removed/python/embedding.py | 28 | 1709 | # a tour of accessing interpreter internals
def call_stack(self,*args):
print "------- interpreter call stack: "
for i in range(self.call_level):
s = self.sub_context[i]
print "%d: position=%d sequence_number=%d filename=%s subname=%s context_status=%x" % (i, s.position, s.sequence_number,s.filename,s.subname,s.context_status),
print "named_params=",s.named_params
def remap_stack(self, *args):
print "------- interpreter remap stack: "
for i in range(self.remap_level):
r = self.blocks[i].executing_remap
print "%d: name=%s argspec=%s prolog_func=%s ngc=%s py=%s epilog=%s modal_group=%d" % (r.name,r.argspec,r.prolog_func,r.ngc,r.epilog_func,r.modal_group)
def tooltable(self, *args):
print "------- tool table:"
for i in range(len(self.tool_table)):
t = self.tool_table[i]
if t.toolno != -1: print str(t)
print "tool in spindle=%d pocketPrepped=%d" % (self.current_tool,self.selected_pocket)
def show_block(self,*args):
if len(args) > 0:
n = int(args[0])
else:
n = 0
b = self.blocks[n]
print "-- blocks[%d]" % (n)
print "line_number=%d o_name=%s p_flag=%d p_number%g q_flag=%d q_number=%g comment=%s" % (b.line_number,b.o_name,b.p_flag,b.p_number,b.q_flag,b.q_number,b.comment)
def show(self,*args):
print "dir(interpreter)=",dir(self)
tooltable(self)
show_block(self,0)
if self.remap_level: show_block(self,self.remap_level)
call_stack(self)
remap_stack(self)
print "active G codes:",self.active_g_codes
print "active M codes:",self.active_m_codes
print "active settings:",self.active_settings
print "parameters:",self.parameters
| lgpl-2.1 |
Erotemic/plottool | plottool_ibeis/interact_annotations.py | 1 | 52275 | """
Interactive tool to draw mask on an image or image-like array.
TODO:
* need concept of subannotation
* need to take options on a right click of an annotation
* add support for arbitrary polygons back in .
* rename species_list to label_list or category_list
* Just use metadata instead of species / category / label
# Need to incorporate parts into metadata
Notes:
3. Change bounding box and update continuously to the original image the
new ANNOTATIONs
2. Make new window and frames inside, double click to pull up normal window
with editing start with just taking in 6 images and ANNOTATIONs
1. ANNOTATION ID number, then list of 4 tuples
python -m utool.util_inspect check_module_usage --pat="interact_annotations.py"
References:
Adapted from matplotlib/examples/event_handling/poly_editor.py
Jan 9 2014: taken from: https://gist.github.com/tonysyu/3090704
CommandLine:
python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show
"""
from __future__ import absolute_import, division, print_function
import six
import re
import numpy as np
try:
import vtool_ibeis as vt
except ImportError:
pass
import utool as ut
import itertools as it
import matplotlib as mpl
from six.moves import zip, range
from plottool_ibeis import draw_func2 as df2
from plottool_ibeis import abstract_interaction
print, rrr, profile = ut.inject2(__name__)
DEFAULT_SPECIES_TAG = '____'
# FIXE THESE TO BE GENERIC
ACCEPT_SAVE_HOTKEY = None # 'ctrl+a'
ADD_RECTANGLE_HOTKEY = 'ctrl+a' # 'ctrl+d'
ADD_RECTANGLE_FULL_HOTKEY = 'ctrl+f'
DEL_RECTANGLE_HOTKEY = 'ctrl+d' # 'ctrl+r'
TOGGLE_LABEL_HOTKEY = 'ctrl+t'
HACK_OFF_SPECIES_TYPING = True
if HACK_OFF_SPECIES_TYPING:
ADD_RECTANGLE_HOTKEY = 'a' # 'ctrl+d'
ADD_RECTANGLE_FULL_HOTKEY = 'f'
DEL_RECTANGLE_HOTKEY = 'd' # 'ctrl+r'
TOGGLE_LABEL_HOTKEY = 't'
NEXT_IMAGE_HOTKEYS = ['right', 'pagedown']
PREV_IMAGE_HOTKEYS = ['left', 'pageup']
TAU = np.pi * 2
class AnnotPoly(mpl.patches.Polygon, ut.NiceRepr):
"""
Helper to represent an annotation polygon
ibeis --aidcmd='Interact image' --aid=1
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> verts = vt.verts_from_bbox([0, 0, 10, 10])
>>> poly = AnnotPoly(None, 0, verts, 0, '____')
"""
def __init__(poly, ax, num, verts, theta, species, fc=(0, 0, 0),
line_color=(1, 1, 1), line_width=4, is_orig=False,
metadata=None, valid_species=None, manager=None):
super(AnnotPoly, poly).__init__(verts, animated=True, fc=fc, ec='none',
alpha=0)
poly.manager = manager
# Ensure basecoords consistency
poly.basecoords = vt.verts_from_bbox(vt.bbox_from_verts(poly.xy))
#poly.basecoords = poly.xy
poly.num = num
poly.is_orig = is_orig
poly.theta = theta
poly.metadata = metadata
poly.valid_species = valid_species
poly.tab_list = valid_species
# put in previous text and tabcomplete list for autocompletion
poly.tctext = ''
poly.tcindex = 0
poly.anchor_idx = 2
poly.child_polys = {}
# Display stuff that should be removed from constructor
poly.xy = calc_display_coords(poly.basecoords, poly.theta)
poly.lines = poly._make_lines(line_color, line_width)
poly.handle = poly._make_handle_line()
poly.species = species
if ax is not None:
poly.axes_init(ax)
def axes_init(poly, ax):
species = poly.species
metadata = poly.metadata
if isinstance(metadata, ut.LazyDict):
metadata_ = ut.dict_subset(metadata, metadata.cached_keys())
else:
metadata_ = metadata
poly.species_tag = ax.text(
#tagpos[0], tagpos[1],
0, 0,
species,
bbox={'facecolor': 'white', 'alpha': .8},
verticalalignment='top',
)
poly.metadata_tag = ax.text(
0, 0,
#tagpos[0] + 5, tagpos[1] + 80,
ut.repr3(metadata_, nobr=True),
bbox={'facecolor': 'white', 'alpha': .7},
verticalalignment='top',
)
# ???
poly.species_tag.remove() # eliminate "leftover" copies
poly.metadata_tag.remove()
#
poly.update_display_coords()
def move_to_back(poly):
# FIXME: doesnt work exactly
# Probalby need to do in the context of other polys
zorder = 0
poly.set_zorder(zorder)
poly.lines.set_zorder(zorder)
poly.handle.set_zorder(zorder)
def __nice__(poly):
return '(num=%r)' % (poly.num)
def add_to_axis(poly, ax):
ax.add_patch(poly)
ax.add_line(poly.lines)
ax.add_line(poly.handle)
def remove_from_axis(poly, ax):
poly.remove()
poly.lines.remove()
poly.handle.remove()
def draw_self(poly, ax, show_species_tags=False, editable=True):
ax.draw_artist(poly)
if not editable and poly.lines.get_marker():
poly.lines.set_marker('')
elif editable and not poly.lines.get_marker():
poly.lines.set_marker('o')
ax.draw_artist(poly.lines)
if editable:
ax.draw_artist(poly.handle)
if editable and show_species_tags:
# Hack to fix matplotlib 1.5 bug
poly.species_tag.figure = ax.figure
poly.metadata_tag.figure = ax.figure
ax.draw_artist(poly.species_tag)
ax.draw_artist(poly.metadata_tag)
def _make_lines(poly, line_color, line_width):
""" verts - list of (x, y) tuples """
_xs, _ys = list(zip(*poly.xy))
color = np.array(line_color)
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color,
'mfc': marker_face_color}
lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,
**line_kwargs)
return lines
def _make_handle_line(poly):
_xs, _ys = list(zip(*poly.calc_handle_display_coords()))
line_width = 4
line_color = (0, 1, 0)
color = np.array(line_color)
marker_face_color = line_color
line_kwargs = {'lw': line_width, 'color': color, 'mfc': marker_face_color}
lines = mpl.lines.Line2D(_xs, _ys, marker='o', alpha=1, animated=True,
**line_kwargs)
return lines
def calc_tag_position(poly):
r"""
CommandLine:
python -m plottool_ibeis.interact_annotations --test-calc_tag_position --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> poly = ut.DynStruct()
>>> poly.basecoords = vt.verts_from_bbox([0, 0, 400, 400], True)
>>> poly.theta = 0
>>> poly.xy = vt.verts_from_bbox([0, 0, 400, 400], True)
>>> tagpos = poly.calc_tag_position()
>>> print('tagpos = %r' % (tagpos,))
"""
points = [[
max(list(zip(*poly.basecoords))[0]),
min(list(zip(*poly.basecoords))[1])
]]
tagpos = rotate_points_around(points, poly.theta, *points_center(poly.xy))[0]
return tagpos
def calc_handle_display_coords(poly):
img_h = poly.manager.img.shape[0]
handle_length = img_h // 32
#MIN_HANDLE_LENGTH = 25
#handle_length = MIN_HANDLE_LENGTH
#handle_length = max(MIN_HANDLE_LENGTH, (h / 4))
cx, cy = points_center(poly.xy)
w, h = vt.get_pointset_extent_wh(np.array(poly.basecoords))
x0, y0 = cx, (cy - (h / 2)) # start at top edge
x1, y1 = (x0, y0 - handle_length)
pts = [(x0, y0), (x1, y1)]
pts = rotate_points_around(pts, poly.theta, cx, cy)
return pts
def update_color(poly, selected=False, editing_parts=False):
if editing_parts:
poly.lines.set_color(df2.PINK)
elif selected:
# Add selected color
sel_color = df2.ORANGE if poly.is_orig else df2.LIGHT_BLUE
poly.lines.set_color(sel_color)
else:
line = poly.lines
line_color = line.get_color()
desel_color = df2.WHITE if poly.is_orig else df2.LIGHTGRAY
if np.any(line_color != np.array(desel_color)):
line.set_color(np.array(desel_color))
def update_lines(poly):
poly.lines.set_data(list(zip(*poly.xy)))
poly.handle.set_data(list(zip(*poly.calc_handle_display_coords())))
def set_species(poly, text):
poly.tctext = text
poly.species_tag.set_text(text)
def increment_species(poly, amount=1):
if len(poly.tab_list) > 0:
tci = (poly.tcindex + amount) % len(poly.tab_list)
poly.tcindex = tci
# All tab is going to do is go through the possibilities
poly.species_tag.set_text(poly.tab_list[poly.tcindex])
def resize_poly(poly, x, y, idx, ax):
"""
Resize a rectangle using idx as the given anchor point. Respects
current rotation.
CommandLine:
python -m plottool_ibeis.interact_annotations --exec-resize_poly --show
Example:
>>> # DISABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> (h, w) = img.shape[0:2]
>>> x1, y1 = 10, 10
>>> x2, y2 = w - 10, h - 10
>>> coords = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
>>> x = 3 * w / 4
>>> y = 3 * h / 4
>>> idx = 3
>>> resize_poly(poly, x, y, idx)
>>> update_UI()
>>> import plottool_ibeis as pt
>>> pt.show_if_requested()
"""
# TODO: allow resize by middle click to scale from the center
# the minus one is because the last coordinate is duplicated (by
# matplotlib) to get a closed polygon
tmpcoords = poly.xy[:-1]
idx = idx % len(tmpcoords)
previdx = (idx - 1) % len(tmpcoords)
nextidx = (idx + 1) % len(tmpcoords)
(dx, dy) = (x - poly.xy[idx][0], y - poly.xy[idx][1])
# Fudge factor is due to gravity vectors constants
fudge_factor = (idx) * TAU / 4
poly_theta = poly.theta + fudge_factor
polar_idx2prev = polarDelta(tmpcoords[idx], tmpcoords[previdx])
polar_idx2next = polarDelta(tmpcoords[idx], tmpcoords[nextidx])
tmpcoords[idx] = (tmpcoords[idx][0] + dx, tmpcoords[idx][1] + dy)
mag_delta = np.linalg.norm((dx, dy))
theta_delta = np.arctan2(dy, dx)
theta_rot = theta_delta - (poly_theta + TAU / 4)
rotx = mag_delta * np.cos(theta_rot)
roty = mag_delta * np.sin(theta_rot)
polar_idx2prev[0] -= rotx
polar_idx2next[0] += roty
tmpcoords[previdx] = apply_polarDelta(polar_idx2prev, tmpcoords[idx])
tmpcoords[nextidx] = apply_polarDelta(polar_idx2next, tmpcoords[idx])
# rotate the points by -theta to get the "unrotated" points for use as
# basecoords
tmpcoords = rotate_points_around(tmpcoords, -poly.theta,
*points_center(poly.xy))
# ensure the poly is closed, matplotlib might do this, but I'm not sure
# if it preserves the ordering we depend on, even if it does add the
# point
tmpcoords = tmpcoords[:] + [tmpcoords[0]]
dispcoords = calc_display_coords(tmpcoords, poly.theta)
if (check_valid_coords(ax, dispcoords) and check_min_wh(tmpcoords)):
poly.basecoords = tmpcoords
poly.update_display_coords()
def rotate_poly(poly, dtheta, ax):
coords_lis = calc_display_coords(poly.basecoords, poly.theta + dtheta)
if check_valid_coords(ax, coords_lis):
poly.theta += dtheta
poly.update_display_coords()
def move_poly(poly, dx, dy, ax):
new_coords = [(x + dx, y + dy) for (x, y) in poly.basecoords]
coords_list = calc_display_coords(new_coords, poly.theta)
if check_valid_coords(ax, coords_list):
poly.basecoords = new_coords
poly.update_display_coords()
def update_display_coords(poly):
poly.xy = calc_display_coords(poly.basecoords, poly.theta)
tag_pos = poly.calc_tag_position()
poly.species_tag.set_position((tag_pos[0] + 5, tag_pos[1]))
poly.metadata_tag.set_position((tag_pos[0] + 5, tag_pos[1] + 50))
def print_info(poly):
print('poly = %r' % (poly,))
print('poly.tag_text = %r' % (poly.species_tag.get_text(),))
print('poly.metadata = %r' % (poly.metadata,))
def get_poly_mask(poly, shape):
h, w = shape[0:2]
y, x = np.mgrid[:h, :w]
points = np.transpose((x.ravel(), y.ravel()))
verts = poly.xy
path = mpl.path.Path(verts)
mask = path.contains_points(points)
#mask = nxutils.points_inside_poly(points, verts)
return mask.reshape(h, w)
def is_near_handle(poly, xy_pt, max_dist):
line = poly.calc_handle_display_coords()
return is_within_distance_from_line(xy_pt, line, max_dist)
@property
def size(poly):
return vt.bbox_from_verts(poly.xy)[2:4]
@six.add_metaclass(ut.ReloadingMetaclass)
class AnnotationInteraction(abstract_interaction.AbstractInteraction):
"""
An interactive polygon editor.
SeeAlso:
ibeis.viz.interact.interact_annotations2
(ensure that any updates here are propogated there)
Args:
verts_list (list) : list of lists of (float, float)
List of (x, y) coordinates used as vertices of the polygon.
"""
# --- Initialization and Figure Widgets
def __init__(self, img, img_ind=None, commit_callback=None,
verts_list=None,
bbox_list=None,
theta_list=None,
species_list=None,
metadata_list=None,
line_width=4, line_color=(1, 1, 1), face_color=(0, 0, 0),
fnum=None, default_species=DEFAULT_SPECIES_TAG,
next_callback=None, prev_callback=None, do_mask=False,
valid_species=[],
**kwargs):
super(AnnotationInteraction, self).__init__(fnum=fnum, **kwargs)
self.valid_species = valid_species
self.commit_callback = commit_callback # commit_callback
self.but_width = .14
#self.but_height = .08
self.next_prev_but_height = .08
self.but_height = self.next_prev_but_height - .01
self.callback_funcs = dict([
('close_event', self.on_close),
('draw_event', self.draw_callback),
('button_press_event', self.on_click),
('button_release_event', self.on_click_release),
('figure_leave_event', self.on_figure_leave),
('key_press_event', self.on_key_press),
('motion_notify_event', self.on_motion),
('pick_event', self.on_pick),
#('resize_event', self.on_resize),
])
self.mpl_callback_ids = {}
self.img = img
self.show_species_tags = True
self.max_dist = 10
def _reinitialize_variables():
self.do_mask = do_mask
self.img_ind = img_ind
self.species_tag = default_species
self.showverts = True
self.fc_default = face_color
self.mouseX = None # mouse X coordinate
self.mouseY = None # mouse Y coordinate
self.ind_xy = None
self._autoinc_polynum = it.count(0) # num polys in image
self._poly_held = False # if any poly is active
self._selected_poly = None # active polygon
self.parent_poly = None # level of parts heirarchy
self.background = None
# Ensure nothing is down
self.reset_mouse_state()
_reinitialize_variables()
# hack involving exploting lexical scoping to save defaults for a
# restore operation
self.reinitialize_variables = _reinitialize_variables
try:
self.fig = df2.figure(fnum=self.fnum, doclf=True, docla=True)
df2.close_figure(self.fig)
except AttributeError:
pass
self.fig = df2.figure(fnum=self.fnum, doclf=True, docla=True)
self.reinitialize_figure(fnum=self.fnum)
assert verts_list is None or bbox_list is None, 'only one can be specified'
# bbox_list will get converted to verts_list
if verts_list is not None:
bbox_list = vt.bboxes_from_vert_list(verts_list)
if bbox_list is not None:
verts_list = [vt.verts_from_bbox(bbox) for bbox in bbox_list]
if theta_list is None:
theta_list = [0 for _ in verts_list]
if species_list is None:
species_list = [self.species_tag for _ in verts_list]
if metadata_list is None:
metadata_list = [None for _ in verts_list]
# Create the list of polygons
self.handle_polygon_creation(bbox_list, theta_list, species_list, metadata_list)
self._ind = None # the active vert
self._current_rotate_poly = None
self.mpl_callback_ids = {}
self.connect_mpl_callbacks(self.fig.canvas)
self.add_action_buttons()
self.update_callbacks(next_callback, prev_callback)
def reinitialize_figure(self, fnum=None):
self.fig.clear()
self.fig.clf()
#self.fig.cla()
#ut.qflag()
self.fnum = fnum
#print(self.fnum)
ax = df2.gca()
#self.fig.ax = ax
self.ax = ax
df2.remove_patches(self.ax)
df2.imshow(self.img, fnum=fnum)
ax.set_clip_on(False)
ax.set_title(('\n'.join([
'Click and drag to select/move/resize/orient an ANNOTATION',
#'Press enter to clear the species tag of the selected ANNOTATION',
'Press tab to cycle through annotation species',
#'Type to edit the ANNOTATION species (press tab to autocomplete)'
])))
def add_action_buttons(self):
self.append_button(
'Add Annotation\n' + pretty_hotkey_map(ADD_RECTANGLE_HOTKEY),
rect=[0.18, 0.015, self.but_width, self.but_height],
callback=self.add_new_poly
)
# self.append_button(
# 'Add Full Annotation\n' + pretty_hotkey_map(ADD_RECTANGLE_FULL_HOTKEY),
# rect=[0.34, 0.015, self.but_width, self.but_height],
# callback=ut.partial(self.add_new_poly, full=True)
# )
self.append_button(
'Delete Annotation\n' + pretty_hotkey_map(DEL_RECTANGLE_HOTKEY),
rect=[0.50, 0.015, self.but_width, self.but_height],
callback=self.delete_current_poly
)
self.append_button(
'Save and Exit\n' + pretty_hotkey_map(ACCEPT_SAVE_HOTKEY),
rect=[0.66, 0.015, self.but_width, self.but_height],
callback=self.save_and_exit
)
def disconnect_mpl_callbacks(self, canvas):
""" disconnects all connected matplotlib callbacks """
for name, callbackid in six.iteritems(self.mpl_callback_ids):
canvas.mpl_disconnect(callbackid)
self.mpl_callback_ids = {}
def connect_mpl_callbacks(self, canvas):
""" disconnects matplotlib callbacks specified in the
self.mpl_callback_ids dict """
#http://matplotlib.org/1.3.1/api/backend_bases_api.html
# Create callback ids
self.disconnect_mpl_callbacks(canvas)
self.mpl_callback_ids = {
name: canvas.mpl_connect(name, func)
for name, func in six.iteritems(self.callback_funcs)
}
self.fig.canvas = canvas
# --- Updates
def update_callbacks(self, next_callback, prev_callback):
self.prev_callback = prev_callback
self.next_callback = next_callback
# Hack because the callbacks actually need to be wrapped
_next_callback = None if self.next_callback is None else self.next_image
_prev_callback = None if self.prev_callback is None else self.prev_image
self.append_button(
'Previous Image\n' + pretty_hotkey_map(PREV_IMAGE_HOTKEYS),
rect=[0.02, 0.01, self.but_width, self.next_prev_but_height],
callback=_prev_callback,
)
self.append_button(
'Next Image\n' + pretty_hotkey_map(NEXT_IMAGE_HOTKEYS),
rect=[0.82, 0.01, self.but_width, self.next_prev_but_height],
callback=_next_callback,
)
def update_image_and_callbacks(self, img, bbox_list, theta_list,
species_list, metadata_list, next_callback,
prev_callback):
self.disconnect_mpl_callbacks(self.fig.canvas)
for poly in six.itervalues(self.polys):
poly.remove()
self.polys = {}
self.reinitialize_variables()
self.img = img
self.reinitialize_figure(fnum=self.fnum)
self.handle_polygon_creation(bbox_list, theta_list, species_list,
metadata_list)
self.add_action_buttons()
self.draw()
self.connect_mpl_callbacks(self.fig.canvas)
self.update_callbacks(next_callback, prev_callback)
print('[interact_annot] drawing')
self.draw()
self.update_UI()
def _update_poly_colors(self):
for poly in six.itervalues(self.uneditable_polys):
poly.update_color()
for ind, poly in six.iteritems(self.editable_polys):
assert poly.num == ind
selected = poly is self._selected_poly
editing_parts = poly is self.parent_poly
poly.update_color(selected, editing_parts)
self.draw()
def _update_poly_lines(self):
for poly in six.itervalues(self.uneditable_polys):
#self.last_vert_ind = len(poly.xy) - 1
poly.update_lines()
for poly in six.itervalues(self.editable_polys):
self.last_vert_ind = len(poly.xy) - 1
poly.update_lines()
def update_UI(self):
self._update_poly_lines()
self._update_poly_colors()
self.fig.canvas.restore_region(self.background)
self.draw_artists()
self.fig.canvas.blit(self.ax.bbox)
def draw_artists(self):
for poly in six.itervalues(self.uneditable_polys):
poly.draw_self(self.ax, editable=False)
for poly in six.itervalues(self.editable_polys):
poly.draw_self(self.ax, self.show_species_tags)
# --- Data Matainence / Other
@property
def uneditable_polys(self):
if self.in_edit_parts_mode:
return {self.parent_poly.num: self.parent_poly}
#return self.polys
else:
return {}
@property
def editable_polys(self):
#return self.polys
if self.in_edit_parts_mode:
return self.parent_poly.child_polys
else:
if self.polys is None:
self.polys = {}
return self.polys
def get_poly_under_cursor(self, x, y):
"""
get the index of the vertex under cursor if within max_dist tolerance
"""
# Remove any deleted polygons
poly_dict = {k: v for k, v in self.editable_polys.items() if v is not None}
if len(poly_dict) > 0:
poly_inds = list(poly_dict.keys())
poly_list = ut.take(poly_dict, poly_inds)
# Put polygon coords into figure space
poly_pts = [poly.get_transform().transform(np.asarray(poly.xy))
for poly in poly_list]
# Find the nearest vertex from the annotations
ind_dist_list = [vt.nearest_point(x, y, polypts)
for polypts in poly_pts]
dist_lists = ut.take_column(ind_dist_list, 1)
min_idx = np.argmin(dist_lists)
sel_polyind = poly_inds[min_idx]
sel_vertx, sel_dist = ind_dist_list[min_idx]
# Ensure nearest distance is within threshold
if sel_dist >= self.max_dist ** 2:
sel_polyind, sel_vertx = (None, None)
else:
sel_polyind, sel_vertx = (None, None)
return sel_polyind, sel_vertx
def get_most_recently_added_poly(self):
if len(self.editable_polys) == 0:
return None
else:
# most recently added polygon has the highest index
poly_ind = max(list(self.editable_polys.keys()))
return self.editable_polys[poly_ind]
def new_polygon(self, verts, theta, species, fc=(0, 0, 0),
line_color=(1, 1, 1), line_width=4, is_orig=False,
metadata=None):
""" verts - list of (x, y) tuples """
# create new polygon from verts
num = six.next(self._autoinc_polynum)
poly = AnnotPoly(ax=self.ax, num=num, verts=verts, theta=theta,
species=species, fc=fc, line_color=line_color,
line_width=line_width, is_orig=is_orig,
metadata=metadata, valid_species=self.valid_species,
manager=self)
poly.set_picker(self.is_poly_pickable)
return poly
def handle_polygon_creation(self, bbox_list, theta_list, species_list,
metadata_list):
""" Maintain original input """
assert bbox_list is not None
if theta_list is None:
theta_list = [0.0 for _ in range(len(bbox_list))]
if species_list is None:
species_list = ['' for _ in range(len(bbox_list))]
assert len(bbox_list) == len(theta_list), 'inconconsitent data1'
assert len(bbox_list) == len(species_list), 'inconconsitent data2'
assert len(bbox_list) == len(metadata_list), 'inconconsitent data2'
self.original_indices = list(range(len(bbox_list)))
self.original_bbox_list = bbox_list
self.original_theta_list = theta_list
self.original_species_list = species_list
self.original_metadata_list = metadata_list
# Convert bbox to verticies
verts_list = [vt.verts_from_bbox(bbox) for bbox in bbox_list]
for verts in verts_list:
verts = np.array(verts)
for vert in verts:
enforce_dims(self.ax, vert)
# Create polygons
poly_list = [self.new_polygon(verts_, theta, species, is_orig=True,
metadata=metadata)
for (verts_, theta, species, metadata) in
zip(verts_list, theta_list, species_list, metadata_list)]
self.polys = {poly.num: poly for poly in poly_list}
if len(self.polys) != 0:
# Select poly with largest area
wh_list = np.array([poly.size for poly in six.itervalues(self.polys)])
poly_index = list(self.polys.keys())[wh_list.prod(axis=1).argmax()]
self._selected_poly = self.polys[poly_index]
self._update_poly_colors()
self._update_poly_lines()
else:
self._selected_poly = None
# Add polygons to the axis
for poly in six.itervalues(self.polys):
poly.add_to_axis(self.ax)
# Give polygons mpl change callbacks
#for poly in six.itervalues(self.polys):
# poly.add_callback(self.poly_changed)
# --- Actions
def add_new_poly(self, event=None, full=False):
""" Adds a new annotation to the image """
if full:
(h, w) = self.img.shape[0:2]
x1, y1 = 1, 1
x2, y2 = w - 1, h - 1
coords = ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
else:
if self._selected_poly is not None:
defaultshape_polys = {
self._selected_poly.num:
self._selected_poly
}
else:
defaultshape_polys = self.editable_polys
coords = default_vertices(self.img, defaultshape_polys,
self.mouseX, self.mouseY)
poly = self.new_polygon(verts=coords, theta=0,
species=self.species_tag)
poly.parent = self.parent_poly
# Add to the correct place in current heirarchy
self.editable_polys[poly.num] = poly
poly.add_to_axis(self.ax)
#self.polys[poly.num] = poly
#poly.add_callback(self.poly_changed)
self._ind = None # the active vert
self._selected_poly = self.get_most_recently_added_poly()
self._update_poly_lines()
self._update_poly_colors()
self.draw()
def delete_current_poly(self, event=None):
"""
Removes an annotation
"""
if self._selected_poly is None:
print('[interact_annot] No polygon selected to delete')
else:
print('[interact_annot] delete annot')
poly = self._selected_poly
#self.polys.pop(poly.num)
del self.editable_polys[poly.num]
# remove the poly from the figure itself
poly.remove_from_axis(self.ax)
#reset anything that has to do with current poly
self._selected_poly = self.get_most_recently_added_poly()
self._poly_held = False
if self._selected_poly is not None:
self._update_poly_colors()
self.draw()
def edit_poly_parts(self, poly):
if poly is None and self.parent_poly is not None:
self._selected_poly = self.parent_poly
print('self.parent_poly = %r' % (self.parent_poly,))
self.parent_poly = poly
if poly is not None:
self._selected_poly = self.get_most_recently_added_poly()
print('self._selected_poly = %r' % (self._selected_poly,))
if poly is None:
self.ax.imshow(vt.convert_colorspace(self.img, 'RGB'))
else:
# Mask the part of the image not belonging to the annotation
mask = poly.get_poly_mask(self.img.shape)
masked_img = apply_mask(self.img, mask)
self.ax.imshow(vt.convert_colorspace(masked_img, 'RGB'))
self._update_poly_colors()
@property
def in_edit_parts_mode(self):
return self.parent_poly is not None
def toggle_species_label(self):
print('[interact_annot] toggle_species_label()')
self.show_species_tags = not self.show_species_tags
self.update_UI()
def save_and_exit(self, event, do_close=True):
"""
The Save and Exit Button
write a callback to redraw viz for bbox_list
"""
print('[interact_annot] Pressed Accept Button')
def _get_annottup_list():
annottup_list = []
indices_list = []
#theta_list = []
for poly in six.itervalues(self.polys):
assert poly is not None
index = poly.num
bbox = tuple(map(int, vt.bbox_from_verts(poly.basecoords)))
theta = poly.theta
species = poly.species_tag.get_text()
annottup = (bbox, theta, species)
indices_list.append(index)
annottup_list.append(annottup)
return indices_list, annottup_list
def _send_back_annotations():
print('[interact_annot] _send_back_annotations')
indices_list, annottup_list = _get_annottup_list()
# Delete if index is in original_indices but no in indices_list
deleted_indices = list(set(self.original_indices) -
set(indices_list))
changed_indices = []
unchanged_indices = [] # sanity check
changed_annottups = []
new_annottups = []
original_annottup_list = list(zip(self.original_bbox_list,
self.original_theta_list,
self.original_species_list))
for index, annottup in zip(indices_list, annottup_list):
# If the index is not in the originals then it is new
if index not in self.original_indices:
new_annottups.append(annottup)
else:
if annottup not in original_annottup_list:
changed_annottups.append(annottup)
changed_indices.append(index)
else:
unchanged_indices.append(index)
self.commit_callback(unchanged_indices, deleted_indices,
changed_indices, changed_annottups,
new_annottups)
if self.commit_callback is not None:
_send_back_annotations()
# Make mask from selection
if self.do_mask is True:
self.fig.clf()
self.ax = ax = self.fig.subplot(111)
mask_list = [poly.get_poly_mask(self.img.shape)
for poly in six.itervalues(self.polys)]
if len(mask_list) == 0:
print('[interact_annot] No polygons to make mask out of')
return 0
mask = mask_list[0]
for mask_ in mask_list:
mask = np.maximum(mask, mask_)
#mask = self.get_poly_mask()
# User must close previous figure
# Modify the image with the mask
masked_img = apply_mask(self.img, mask)
# show the modified image
ax.imshow(masked_img)
ax.title('Region outside of mask is darkened')
ax.figure.show()
return
print('[interact_annot] Accept Over')
if do_close:
df2.close_figure(self.fig)
# --- Connected Slots and Callbacks
def next_image(self, event):
if self.next_callback is not None:
self.next_callback()
def prev_image(self, event):
if self.prev_callback is not None:
self.prev_callback()
def start(self):
# FIXME: conform to abstract_interaction start conventions
#self._ensure_running()
#self.show_page()
self.show()
def show(self):
self.draw()
self.bring_to_front()
def draw_callback(self, event):
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
self.draw_artists()
def _show_poly_context_menu(self, event):
def _make_options():
metadata = self._selected_poly.metadata
options = []
options += [
#('Foo: ', ut.partial(print, 'bar')),
#('Move to back ', self._selected_poly.move_to_back),
('PolyInfo: ', self._selected_poly.print_info),
]
if isinstance(metadata, ut.LazyDict):
options += metadata.nocache_eval('annot_context_options')
return options
options = _make_options()
self.show_popup_menu(options, event)
def is_poly_pickable(self, artist, event):
if artist.num in self.editable_polys:
mouse_xy = event.x, event.y
hit = artist.contains_point(mouse_xy)
else:
hit = False
#import utool
#utool.embed()
props = {'dblclick': event.dblclick}
return hit, props
def on_pick(self, event):
""" Makes selected polygon translucent """
if self.debug > 0 or True:
print('[interact_annot] on_pick')
if not self._poly_held:
artist = event.artist
print('[interact_annot] picked artist = %r' % (artist,))
self._selected_poly = artist
self._poly_held = True
if event.dblclick and not self.in_edit_parts_mode:
self.edit_poly_parts(self._selected_poly)
pass
#x, y = event.mouseevent.xdata, event.mouseevent.xdata
def on_click(self, event):
"""
python -m ibeis.viz.interact.interact_annotations2 --test-ishow_image2 --show
"""
super(AnnotationInteraction, self).on_click(event)
if self._ind is not None:
self._ind = None
return
if not self.showverts:
return
if event.inaxes is None:
return
if len(self.editable_polys) == 0:
print('[interact_annot] No polygons on screen')
return
# Right click - context menu
if event.button == self.RIGHT_BUTTON:
self._show_poly_context_menu(event)
# Left click, indicate that a mouse button is down
if event.button == self.LEFT_BUTTON:
#if event.dblclick and not self.in_edit_parts_mode:
# # On double click enter a single annotation to annotation parts
# #print("DOUBLECLICK")
# #self.edit_poly_parts(self._selected_poly)
if event.key == 'shift':
self._current_rotate_poly = self._selected_poly
else:
# Determine if we are clicking the rotation line
mouse_xy = (event.xdata, event.ydata)
for poly in six.itervalues(self.editable_polys):
if poly.is_near_handle(mouse_xy, self.max_dist):
self._current_rotate_poly = poly
break
if event.dblclick:
# Reset rotation
if self._current_rotate_poly is not None:
self._current_rotate_poly.theta = 0
self._current_rotate_poly.update_display_coords()
polyind, self._ind = self.get_poly_under_cursor(event.x, event.y)
if self._ind is not None and polyind is not None:
self._selected_poly = self.editable_polys[polyind]
if self._selected_poly is None:
return
self.ind_xy = self._selected_poly.xy[self._ind]
self._poly_held = True
self._selected_poly.anchor_idx = self._ind
self.mouseX, self.mouseY = event.xdata, event.ydata
if self._poly_held is True or self._ind is not None:
self._selected_poly.set_alpha(.2)
self._update_poly_colors()
self._update_poly_colors()
self._update_poly_lines()
if self.background is not None:
self.fig.canvas.restore_region(self.background)
else:
print('[interact_annot] error: self.background is none.'
' Trying refresh.')
self.fig.canvas.restore_region(self.background)
self.background = self.fig.canvas.copy_from_bbox(self.ax.bbox)
# Redraw blitted objects
self.draw_artists()
self.fig.canvas.blit(self.ax.bbox)
def on_motion(self, event):
if ut.VERBOSE:
print('[interact_annot] on_motion')
print('[interact_annot] Got key: %r' % event.key)
super(AnnotationInteraction, self).on_motion(event)
# uses boolean punning for terseness
lastX = self.mouseX or None
lastY = self.mouseY or None
# Allow for getting coordinates outside the axes
ax = self.ax
mousePos = [event.x, event.y]
self.mouseX, self.mouseY = ax.transData.inverted().transform(mousePos)
deltaX = lastX is not None and self.mouseX - lastX
deltaY = lastY is not None and self.mouseY - lastY
if not self.showverts:
return
#if self.in_edit_parts_mode:
# return
quick_resize = (self._poly_held is True and (
(event.button == self.MIDDLE_BUTTON) or
(event.button == self.RIGHT_BUTTON) or
(event.button == self.LEFT_BUTTON and event.key == 'ctrl')
))
if self._poly_held is True and self._ind is not None:
# Resize by dragging corner
self._selected_poly.resize_poly(self.mouseX, self.mouseY,
self._ind, self.ax)
self._selected_poly.anchor_idx = self._ind
elif quick_resize:
# Quick resize with special click
anchor_idx = self._selected_poly.anchor_idx
idx = (anchor_idx + 2) % 4 # choose opposite anchor point
self._selected_poly.resize_poly(self.mouseX, self.mouseY, idx,
self.ax)
elif self._current_rotate_poly:
# Rotate using handle
cx, cy = points_center(self._current_rotate_poly.xy)
theta = np.arctan2(cy - self.mouseY, cx - self.mouseX) - TAU / 4
dtheta = theta - self._current_rotate_poly.theta
self._current_rotate_poly.rotate_poly(dtheta, self.ax)
elif self._ind is None and event.button == self.LEFT_BUTTON:
# Translate by dragging inside annot
flag = deltaX is not None and deltaY is not None
if self._poly_held is True and flag:
self._selected_poly.move_poly(deltaX, deltaY, self.ax)
self._ind = None
else:
return
self.update_UI()
def on_click_release(self, event):
super(AnnotationInteraction, self).on_click_release(event)
#if self._poly_held is True:
self._poly_held = False
self._current_rotate_poly = None
if not self.showverts:
return
if self._selected_poly is None:
return
_flag = (
self._ind is None or
self._poly_held is False or
(self._ind is not None and
self.is_down['left'] is True and
self._selected_poly is not None
)
)
if _flag:
self._selected_poly.set_alpha(0)
#self._selected_poly.set_facecolor('white')
self.update_UI()
if self._ind is None:
return
if len(self.editable_polys) == 0:
print('[interact_annot] No polygons on screen')
return
if self._selected_poly is None:
print('[interact_annot] WARNING: Polygon unknown.'
' Using default. (2)')
self._selected_poly = self.get_most_recently_added_poly()
curr_xy = self._selected_poly.xy[self._ind]
if self.ind_xy is not None:
if np.all(np.fabs(self.ind_xy - curr_xy) < 3):
return
self._ind = None
self._poly_held = False
self.draw()
def on_figure_leave(self, event):
if self.debug > 0:
print('[interact_annot] figure leave')
#self.print_status()
#self.on_click_release(event)
self._poly_held = False
self._ind = None
self.reset_mouse_state()
#self.print_status()
def on_key_press(self, event):
if self.debug > 0:
print('[interact_annot] on_key_press')
print('[interact_annot] Got key: %r' % event.key)
print('[interact_annot] Got key: %r' % event.key)
if not event.inaxes:
return
if event.key == ACCEPT_SAVE_HOTKEY:
self.save_and_exit(event)
elif event.key == ADD_RECTANGLE_HOTKEY:
self.add_new_poly()
elif event.key == ADD_RECTANGLE_FULL_HOTKEY:
self.add_new_poly(full=True)
elif event.key == DEL_RECTANGLE_HOTKEY:
self.delete_current_poly()
elif event.key == TOGGLE_LABEL_HOTKEY:
self.toggle_species_label()
if re.match('escape', event.key):
self.edit_poly_parts(None)
if re.match('^backspace$', event.key):
self._selected_poly.set_species(DEFAULT_SPECIES_TAG)
if re.match('^tab$', event.key):
self._selected_poly.increment_species(amount=1)
if re.match('^ctrl\+tab$', event.key):
self._selected_poly.increment_species(amount=-1)
# NEXT ANND PREV COMMAND
def _matches_hotkey(key, hotkeys):
return any([re.match(hk, key) is not None for hk in
ut.ensure_iterable(hotkeys)])
if _matches_hotkey(event.key, PREV_IMAGE_HOTKEYS):
self.prev_image(event)
if _matches_hotkey(event.key, NEXT_IMAGE_HOTKEYS):
self.next_image(event)
self.draw()
#def poly_changed(self, poly):
# """ this method is called whenever the polygon object is called """
# print('poly_changed poly=%r' % (poly,))
# # only copy the artist props to the line (except visibility)
# #vis = poly.lines.get_visible()
# #vis = poly.handle.get_visible()
# #poly.lines.set_visible(vis)
# #poly.handle.set_visible(vis)
def pretty_hotkey_map(hotkeys):
if hotkeys is None:
return ''
hotkeys = [hotkeys] if not isinstance(hotkeys, list) else hotkeys
mapping = {
#'right': 'right arrow',
#'left': 'left arrow',
}
mapped_hotkeys = [mapping.get(hk, hk) for hk in hotkeys]
hotkey_str = '(' + ut.conj_phrase(mapped_hotkeys, 'or') + ')'
return hotkey_str
def apply_mask(img, mask):
masked_img = img.copy()
masked_img[~mask] = np.uint8(np.clip(masked_img[~mask] - 100., 0, 255))
return masked_img
def points_center(pts):
# the polygons have the first point listed twice in order for them to be
# drawn as closed, but that point shouldn't be counted twice for computing
# the center (hence the [:-1] slice)
return np.array(pts[:-1]).mean(axis=0)
def rotate_points_around(points, theta, ax, ay):
"""
References:
http://www.euclideanspace.com/maths/geometry/affine/aroundPoint/matrix2d/
"""
# TODO: Can use vtool_ibeis for this
sin, cos, array = np.sin, np.cos, np.array
augpts = array([array((x, y, 1)) for (x, y) in points])
ct = cos(theta)
st = sin(theta)
# correct matrix obtained from
rot_mat = array(
[(ct, -st, ax - ct * ax + st * ay),
(st, ct, ay - st * ax - ct * ay),
( 0, 0, 1)]
)
return [(x, y) for (x, y, z) in rot_mat.dot(augpts.T).T]
def calc_display_coords(oldcoords, theta):
return rotate_points_around(oldcoords, theta, *points_center(oldcoords))
def polarDelta(p1, p2):
mag = vt.L2(p1, p2)
theta = np.arctan2(p2[1] - p1[1], p2[0] - p1[0])
return [mag, theta]
def apply_polarDelta(poldelt, cart):
newx = cart[0] + (poldelt[0] * np.cos(poldelt[1]))
newy = cart[1] + (poldelt[0] * np.sin(poldelt[1]))
return (newx, newy)
def is_within_distance_from_line(pt, line, max_dist):
pt = np.array(pt)
line = np.array(line)
return vt.distance_to_lineseg(pt, line[0], line[1]) <= max_dist
def check_min_wh(coords):
"""
Depends on hardcoded indices, which is inelegant, but
we're already depending on those for the FUDGE_FACTORS
array above
0----1
| |
3----2
"""
MIN_W = 5
MIN_H = 5
# the seperate 1 and 2 variables are not strictly necessary, but
# provide a sanity check to ensure that we're dealing with the
# right shape
#w, h = vt.get_pointset_extent_wh(np.array(coords))
w1 = coords[1][0] - coords[0][0]
w2 = coords[2][0] - coords[3][0]
h1 = coords[3][1] - coords[0][1]
h2 = coords[2][1] - coords[1][1]
assert np.isclose(w1, w2), ('w1: %r, w2: %r' % (w1, w2))
assert np.isclose(h1, h2), ('h1: %r, h2: %r' % (h1, h2))
w, h = w1, h1
#print('w, h = (%r, %r)' % (w1, h1))
return (MIN_W < w) and (MIN_H < h)
def default_vertices(img, polys=None, mouseX=None, mouseY=None):
"""Default to rectangle that has a quarter-width/height border."""
(h, w) = img.shape[0:2]
# Center the new verts around wherever the mouse is
if mouseX is not None and mouseY is not None:
center_x = mouseX
center_h = mouseY
else:
center_x = w // 2
center_h = h // 2
if polys is not None and len(polys) > 0:
# Use the largest polygon size as the default verts
wh_list = np.array([vt.bbox_from_verts(poly.xy)[2:4]
for poly in six.itervalues(polys)])
w_, h_ = wh_list.max(axis=0) // 2
else:
# If no poly exists use 1/4 of the image size
w_, h_ = (w // 4, h // 4)
# Get the x/y extents by offseting the centers
x1, x2 = np.array([center_x, center_x]) + (w_ * np.array([-1, 1]))
y1, y2 = np.array([center_h, center_h]) + (h_ * np.array([-1, 1]))
# Clip to bounds
x1 = max(x1, 1)
y1 = max(y1, 1)
x2 = min(x2, w - 1)
y2 = min(y2, h - 1)
return ((x1, y1), (x1, y2), (x2, y2), (x2, y1))
def check_valid_coords(ax, coords_list):
return all([check_dims(ax, xy_pt) for xy_pt in coords_list])
def check_dims(ax, xy_pt, margin=0.5):
"""
checks if bounding box dims are ok
Allow the bounding box to go off the image
so orientations can be done correctly
"""
num_out = 0
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if xy_pt[0] < xlim[0] + margin:
num_out += 1
if xy_pt[0] > xlim[1] - margin:
num_out += 1
if xy_pt[1] < ylim[1] + margin:
num_out += 1
if xy_pt[1] > ylim[0] - margin:
num_out += 1
return num_out <= 3
def enforce_dims(ax, xy_pt, margin=0.5):
"""
ONLY USE THIS ON UNROTATED RECTANGLES, as to do otherwise may yield
arbitrary polygons
"""
xlim = ax.get_xlim()
ylim = ax.get_ylim()
if xy_pt[0] < xlim[0] + margin:
xy_pt[0] = xlim[0] + margin
if xy_pt[0] > xlim[1] - margin:
xy_pt[0] = xlim[1] - margin
if xy_pt[1] < ylim[1] + margin:
xy_pt[1] = ylim[1] + margin
if xy_pt[1] > ylim[0] - margin:
xy_pt[1] = ylim[0] - margin
return True
def test_interact_annots():
r"""
CommandLine:
python -m plottool_ibeis.interact_annotations --test-test_interact_annots --show
Example:
>>> # ENABLE_DOCTEST
>>> from plottool_ibeis.interact_annotations import * # NOQA
>>> import plottool_ibeis as pt
>>> # build test data
>>> # execute function
>>> self = test_interact_annots()
>>> # verify results
>>> print(self)
>>> pt.show_if_requested()
"""
print('[interact_annot] *** START DEMO ***')
verts_list = [
((0, 400), (400, 400), (400, 0), (0, 0), (0, 400)),
((400, 700), (700, 700), (700, 400), (400, 400), (400, 700))
]
#if img is None:
try:
img_url = 'http://i.imgur.com/Vq9CLok.jpg'
img_fpath = ut.grab_file_url(img_url)
img = vt.imread(img_fpath)
except Exception as ex:
print('[interact_annot] cant read zebra: %r' % ex)
img = np.random.uniform(0, 255, size=(100, 100))
valid_species = ['species1', 'species2']
metadata_list = [{'name': 'foo'}, None]
self = AnnotationInteraction(img, verts_list=verts_list,
valid_species=valid_species,
metadata_list=metadata_list,
fnum=0) # NOQA
return self
if __name__ == '__main__':
"""
CommandLine:
python -m plottool_ibeis.interact_annotations --exec-test_interact_annots --show
CommandLine:
python -m plottool_ibeis.interact_annotations
python -m plottool_ibeis.interact_annotations --allexamples
python -m plottool_ibeis.interact_annotations --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| apache-2.0 |
nimbis/django-shop-richcatalog | shop_richcatalog/south_migrations/0001_initial.py | 1 | 5250 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
depends_on = (
('shop', '0012_auto__add_field_extraorderpricefield_data'),
('shop_richproduct', '0001_initial'),
)
def forwards(self, orm):
# Adding model 'Catalog'
db.create_table(u'shop_richcatalog_catalog', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(unique=True, max_length=50)),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='children', null=True, to=orm['shop_richcatalog.Catalog'])),
('description', self.gf('django.db.models.fields.CharField')(max_length=255)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100)),
(u'lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal(u'shop_richcatalog', ['Catalog'])
# Adding M2M table for field products on 'Catalog'
m2m_table_name = db.shorten_name(u'shop_richcatalog_catalog_products')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('catalog', models.ForeignKey(orm[u'shop_richcatalog.catalog'], null=False)),
('product', models.ForeignKey(orm['shop.product'], null=False))
))
db.create_unique(m2m_table_name, ['catalog_id', 'product_id'])
def backwards(self, orm):
# Deleting model 'Catalog'
db.delete_table(u'shop_richcatalog_catalog')
# Removing M2M table for field products on 'Catalog'
db.delete_table(db.shorten_name(u'shop_richcatalog_catalog_products'))
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_shop.product_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'unit_price': ('django.db.models.fields.DecimalField', [], {'default': "'0.0'", 'max_digits': '30', 'decimal_places': '2'})
},
u'shop_richcatalog.catalog': {
'Meta': {'object_name': 'Catalog'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['shop_richcatalog.Catalog']"}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'catalogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['shop.Product']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['shop_richcatalog']
| bsd-3-clause |
proxysh/Safejumper-for-Mac | buildlinux/env64/lib/python2.7/site-packages/zope/interface/_flatten.py | 86 | 1056 | ##############################################################################
#
# Copyright (c) 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Adapter-style interface registry
See Adapter class.
"""
from zope.interface import Declaration
def _flatten(implements, include_None=0):
try:
r = implements.flattened()
except AttributeError:
if implements is None:
r=()
else:
r = Declaration(implements).flattened()
if not include_None:
return r
r = list(r)
r.append(None)
return r
| gpl-2.0 |
Maccimo/intellij-community | python/testData/inspections/PyDataclassInspection/helpersArgument.py | 20 | 2026 | import dataclasses
from typing import Type, Union
class A:
pass
dataclasses.fields(<warning descr="'dataclasses.fields' method should be called on dataclass instances or types">A</warning>)
dataclasses.fields(<warning descr="'dataclasses.fields' method should be called on dataclass instances or types">A()</warning>)
dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">A()</warning>)
dataclasses.astuple(<warning descr="'dataclasses.astuple' method should be called on dataclass instances">A()</warning>)
dataclasses.replace(<warning descr="'dataclasses.replace' method should be called on dataclass instances">A()</warning>)
@dataclasses.dataclass
class B:
pass
dataclasses.fields(B)
dataclasses.fields(B())
dataclasses.asdict(B())
dataclasses.astuple(B())
dataclasses.replace(B())
dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">B</warning>)
dataclasses.astuple(<warning descr="'dataclasses.astuple' method should be called on dataclass instances">B</warning>)
dataclasses.replace(<warning descr="'dataclasses.replace' method should be called on dataclass instances">B</warning>)
def unknown(p):
dataclasses.fields(p)
dataclasses.asdict(p)
dataclasses.astuple(p)
def structural(p):
print(len(p))
dataclasses.fields(p)
dataclasses.asdict(p)
dataclasses.astuple(p)
dataclasses.replace(p)
def union1(p: Union[A, B]):
dataclasses.fields(p)
dataclasses.asdict(p)
dataclasses.astuple(p)
dataclasses.replace(p)
def union2(p: Union[Type[A], Type[B]]):
dataclasses.fields(p)
dataclasses.asdict(<warning descr="'dataclasses.asdict' method should be called on dataclass instances">p</warning>)
dataclasses.astuple(<warning descr="'dataclasses.astuple' method should be called on dataclass instances">p</warning>)
dataclasses.replace(<warning descr="'dataclasses.replace' method should be called on dataclass instances">p</warning>) | apache-2.0 |
kirca/OpenUpgrade | addons/document/report/document_report.py | 341 | 4224 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields,osv
from openerp import tools
class report_document_user(osv.osv):
_name = "report.document.user"
_description = "Files details by Users"
_auto = False
_columns = {
'name': fields.char('Year', size=64,readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month',readonly=True),
'user_id': fields.many2one('res.users', 'Owner', readonly=True),
'user': fields.related('user_id', 'name', type='char', size=64, readonly=True),
'directory': fields.char('Directory',size=64,readonly=True),
'datas_fname': fields.char('File Name',size=64,readonly=True),
'create_date': fields.datetime('Date Created', readonly=True),
'change_date': fields.datetime('Modified Date', readonly=True),
'file_size': fields.integer('File Size', readonly=True),
'nbr':fields.integer('# of Files', readonly=True),
'type':fields.char('Directory Type',size=64,readonly=True),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_document_user')
cr.execute("""
CREATE OR REPLACE VIEW report_document_user as (
SELECT
min(f.id) as id,
to_char(f.create_date, 'YYYY') as name,
to_char(f.create_date, 'MM') as month,
f.user_id as user_id,
count(*) as nbr,
d.name as directory,
f.datas_fname as datas_fname,
f.create_date as create_date,
f.file_size as file_size,
min(d.type) as type,
f.write_date as change_date
FROM ir_attachment f
left join document_directory d on (f.parent_id=d.id and d.name<>'')
group by to_char(f.create_date, 'YYYY'), to_char(f.create_date, 'MM'),d.name,f.parent_id,d.type,f.create_date,f.user_id,f.file_size,d.type,f.write_date,f.datas_fname
)
""")
class report_document_file(osv.osv):
_name = "report.document.file"
_description = "Files details by Directory"
_auto = False
_columns = {
'file_size': fields.integer('File Size', readonly=True),
'nbr':fields.integer('# of Files', readonly=True),
'month': fields.char('Month', size=24, readonly=True),
}
_order = "month"
def init(self, cr):
tools.drop_view_if_exists(cr, 'report_document_file')
cr.execute("""
create or replace view report_document_file as (
select min(f.id) as id,
count(*) as nbr,
min(EXTRACT(MONTH FROM f.create_date)||'-'||to_char(f.create_date,'Month')) as month,
sum(f.file_size) as file_size
from ir_attachment f
group by EXTRACT(MONTH FROM f.create_date)
)
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
weeksghost/dpxdt | dpxdt/tools/diff_my_urls.py | 7 | 6027 | #!/usr/bin/env python
# Copyright 2014 Brett Slatkin
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility for diffing a set of URL pairs defined in a config file.
Example usage:
./dpxdt/tools/diff_my_urls.py \
--upload_build_id=1234 \
--release_server_prefix=https://my-dpxdt-apiserver.example.com/api \
--release_client_id=<your api key> \
--release_client_secret=<your api secret> \
--upload_release_name="My release name" \
--release_cut_url=http://example.com/path/to/my/release/tool/for/this/cut
--tests_json_path=my_url_tests.json
Example input file "my_url_tests.json". One entry per test:
[
{
"name": "My homepage",
"run_url": "http://localhost:5000/static/dummy/dummy_page1.html",
"run_config": {
"viewportSize": {
"width": 1024,
"height": 768
},
"injectCss": "#foobar { background-color: lime",
"injectJs": "document.getElementById('foobar').innerText = 'bar';",
},
"ref_url": "http://localhost:5000/static/dummy/dummy_page1.html",
"ref_config": {
"viewportSize": {
"width": 1024,
"height": 768
},
"injectCss": "#foobar { background-color: goldenrod; }",
"injectJs": "document.getElementById('foobar').innerText = 'foo';",
}
},
...
]
See README.md for documentation of config parameters.
"""
import datetime
import json
import logging
import sys
# Local Libraries
import gflags
FLAGS = gflags.FLAGS
# Local modules
from dpxdt.client import fetch_worker
from dpxdt.client import release_worker
from dpxdt.client import workers
import flags
class Test(object):
"""Represents the JSON of a single test."""
def __init__(self, name=None, run_url=None, run_config=None,
ref_url=None, ref_config=None):
self.name = name
self.run_url = run_url
self.run_config_data = json.dumps(run_config) if run_config else None
self.ref_url = ref_url
self.ref_config_data = json.dumps(ref_config) if ref_config else None
def load_tests(data):
"""Loads JSON data and returns a list of Test objects it contains."""
test_list = json.loads(data)
results = []
for test_json in test_list:
results.append(Test(**test_json))
return results
class DiffMyUrls(workers.WorkflowItem):
"""Workflow for diffing a set of URL pairs defined in a config file.
Args:
release_url: URL of the newest and best version of the page.
tests: List of Test objects to test.
upload_build_id: Optional. Build ID of the site being compared. When
supplied a new release will be cut for this build comparing it
to the last good release.
upload_release_name: Optional. Release name to use for the build. When
not supplied, a new release based on the current time will be
created.
heartbeat: Function to call with progress status.
"""
def run(self,
release_url,
tests,
upload_build_id,
upload_release_name,
heartbeat=None):
if not upload_release_name:
upload_release_name = str(datetime.datetime.utcnow())
yield heartbeat('Creating release %s' % upload_release_name)
release_number = yield release_worker.CreateReleaseWorkflow(
upload_build_id, upload_release_name, release_url)
pending_uploads = []
for test in tests:
item = release_worker.RequestRunWorkflow(
upload_build_id, upload_release_name, release_number,
test.name, url=test.run_url, config_data=test.run_config_data,
ref_url=test.ref_url, ref_config_data=test.ref_config_data)
pending_uploads.append(item)
yield heartbeat('Requesting %d runs' % len(pending_uploads))
yield pending_uploads
yield heartbeat('Marking runs as complete')
release_url = yield release_worker.RunsDoneWorkflow(
upload_build_id, upload_release_name, release_number)
yield heartbeat('Results viewable at: %s' % release_url)
def real_main(release_url=None,
tests_json_path=None,
upload_build_id=None,
upload_release_name=None):
"""Runs diff_my_urls."""
coordinator = workers.get_coordinator()
fetch_worker.register(coordinator)
coordinator.start()
data = open(FLAGS.tests_json_path).read()
tests = load_tests(data)
item = DiffMyUrls(
release_url,
tests,
upload_build_id,
upload_release_name,
heartbeat=workers.PrintWorkflow)
item.root = True
coordinator.input_queue.put(item)
coordinator.wait_one()
coordinator.stop()
coordinator.join()
def main(argv):
try:
argv = FLAGS(argv)
except gflags.FlagsError, e:
print '%s\nUsage: %s ARGS\n%s' % (e, sys.argv[0], FLAGS)
sys.exit(1)
assert FLAGS.release_cut_url
assert FLAGS.release_server_prefix
assert FLAGS.tests_json_path
assert FLAGS.upload_build_id
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
real_main(
release_url=FLAGS.release_cut_url,
tests_json_path=FLAGS.tests_json_path,
upload_build_id=FLAGS.upload_build_id,
upload_release_name=FLAGS.upload_release_name)
if __name__ == '__main__':
main(sys.argv)
| apache-2.0 |
fqul/scrapy | scrapy/utils/log.py | 108 | 6012 | # -*- coding: utf-8 -*-
import sys
import logging
import warnings
from logging.config import dictConfig
from twisted.python.failure import Failure
from twisted.python import log as twisted_log
import scrapy
from scrapy.settings import overridden_settings, Settings
from scrapy.exceptions import ScrapyDeprecationWarning
logger = logging.getLogger(__name__)
def failure_to_exc_info(failure):
"""Extract exc_info from Failure instances"""
if isinstance(failure, Failure):
return (failure.type, failure.value, failure.getTracebackObject())
class TopLevelFormatter(logging.Filter):
"""Keep only top level loggers's name (direct children from root) from
records.
This filter will replace Scrapy loggers' names with 'scrapy'. This mimics
the old Scrapy log behaviour and helps shortening long names.
Since it can't be set for just one logger (it won't propagate for its
children), it's going to be set in the root handler, with a parametrized
`loggers` list where it should act.
"""
def __init__(self, loggers=None):
self.loggers = loggers or []
def filter(self, record):
if any(record.name.startswith(l + '.') for l in self.loggers):
record.name = record.name.split('.', 1)[0]
return True
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'loggers': {
'scrapy': {
'level': 'DEBUG',
},
'twisted': {
'level': 'ERROR',
},
}
}
def configure_logging(settings=None, install_root_handler=True):
"""
Initialize logging defaults for Scrapy.
:param settings: settings used to create and configure a handler for the
root logger (default: None).
:type settings: dict, :class:`~scrapy.settings.Settings` object or ``None``
:param install_root_handler: whether to install root logging handler
(default: True)
:type install_root_handler: bool
This function does:
- Route warnings and twisted logging through Python standard logging
- Assign DEBUG and ERROR level to Scrapy and Twisted loggers respectively
- Route stdout to log if LOG_STDOUT setting is True
When ``install_root_handler`` is True (default), this function also
creates a handler for the root logger according to given settings
(see :ref:`topics-logging-settings`). You can override default options
using ``settings`` argument. When ``settings`` is empty or None, defaults
are used.
"""
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
observer = twisted_log.PythonLoggingObserver('twisted')
observer.start()
dictConfig(DEFAULT_LOGGING)
if isinstance(settings, dict) or settings is None:
settings = Settings(settings)
if settings.getbool('LOG_STDOUT'):
sys.stdout = StreamLogger(logging.getLogger('stdout'))
if install_root_handler:
logging.root.setLevel(logging.NOTSET)
handler = _get_handler(settings)
logging.root.addHandler(handler)
def _get_handler(settings):
""" Return a log handler object according to settings """
filename = settings.get('LOG_FILE')
if filename:
encoding = settings.get('LOG_ENCODING')
handler = logging.FileHandler(filename, encoding=encoding)
elif settings.getbool('LOG_ENABLED'):
handler = logging.StreamHandler()
else:
handler = logging.NullHandler()
formatter = logging.Formatter(
fmt=settings.get('LOG_FORMAT'),
datefmt=settings.get('LOG_DATEFORMAT')
)
handler.setFormatter(formatter)
handler.setLevel(settings.get('LOG_LEVEL'))
handler.addFilter(TopLevelFormatter(['scrapy']))
return handler
def log_scrapy_info(settings):
logger.info("Scrapy %(version)s started (bot: %(bot)s)",
{'version': scrapy.__version__, 'bot': settings['BOT_NAME']})
logger.info("Optional features available: %(features)s",
{'features': ", ".join(scrapy.optional_features)})
d = dict(overridden_settings(settings))
logger.info("Overridden settings: %(settings)r", {'settings': d})
class StreamLogger(object):
"""Fake file-like stream object that redirects writes to a logger instance
Taken from:
http://www.electricmonk.nl/log/2011/08/14/redirect-stdout-and-stderr-to-a-logger-in-python/
"""
def __init__(self, logger, log_level=logging.INFO):
self.logger = logger
self.log_level = log_level
self.linebuf = ''
def write(self, buf):
for line in buf.rstrip().splitlines():
self.logger.log(self.log_level, line.rstrip())
class LogCounterHandler(logging.Handler):
"""Record log levels count into a crawler stats"""
def __init__(self, crawler, *args, **kwargs):
super(LogCounterHandler, self).__init__(*args, **kwargs)
self.crawler = crawler
def emit(self, record):
sname = 'log_count/{}'.format(record.levelname)
self.crawler.stats.inc_value(sname)
def logformatter_adapter(logkws):
"""
Helper that takes the dictionary output from the methods in LogFormatter
and adapts it into a tuple of positional arguments for logger.log calls,
handling backward compatibility as well.
"""
if not {'level', 'msg', 'args'} <= set(logkws):
warnings.warn('Missing keys in LogFormatter method',
ScrapyDeprecationWarning)
if 'format' in logkws:
warnings.warn('`format` key in LogFormatter methods has been '
'deprecated, use `msg` instead',
ScrapyDeprecationWarning)
level = logkws.get('level', logging.INFO)
message = logkws.get('format', logkws.get('msg'))
# NOTE: This also handles 'args' being an empty dict, that case doesn't
# play well in logger.log calls
args = logkws if not logkws.get('args') else logkws['args']
return (level, message, args)
| bsd-3-clause |
bd-j/prospector | scripts/prospector_restart.py | 3 | 7080 | #!/usr/local/bin/python
import time, sys, os
import numpy as np
np.errstate(invalid='ignore')
from prospect.models import model_setup
from prospect.io import write_results
from prospect.io import read_results as pr
from prospect import fitting
from prospect.likelihood import lnlike_spec, lnlike_phot, write_log, chi_spec, chi_phot
# --------------
# Read command line arguments
# --------------
sargv = sys.argv
argdict = {'restart_from': '', 'niter': 1024}
clargs = model_setup.parse_args(sargv, argdict=argdict)
# ----------
# Result object and Globals
# ----------
result, global_obs, global_model = pr.results_from(clargs["restart_from"])
is_emcee = (len(result["chain"].shape) == 3) & (result["chain"].shape[0] > 1)
assert is_emcee, "Result file does not have a chain of the proper shape."
# SPS Model instance (with libraries check)
sps = pr.get_sps(result)
run_params = result["run_params"]
run_params.update(clargs)
# Noise model (this should be doable via read_results)
from prospect.models.model_setup import import_module_from_string
param_file = (result['run_params'].get('param_file', ''),
result.get("paramfile_text", ''))
path, filename = os.path.split(param_file[0])
modname = filename.replace('.py', '')
user_module = import_module_from_string(param_file[1], modname)
spec_noise, phot_noise = user_module.load_gp(**run_params)
# -----------------
# LnP function as global
# ------------------
def lnprobfn(theta, model=None, obs=None, residuals=False,
verbose=run_params['verbose']):
"""Given a parameter vector and optionally a dictionary of observational
ata and a model object, return the ln of the posterior. This requires that
an sps object (and if using spectra and gaussian processes, a GP object) be
instantiated.
:param theta:
Input parameter vector, ndarray of shape (ndim,)
:param model:
bsfh.sedmodel model object, with attributes including ``params``, a
dictionary of model parameters. It must also have ``prior_product()``,
and ``mean_model()`` methods defined.
:param obs:
A dictionary of observational data. The keys should be
*``wavelength``
*``spectrum``
*``unc``
*``maggies``
*``maggies_unc``
*``filters``
* and optional spectroscopic ``mask`` and ``phot_mask``.
:returns lnp:
Ln posterior probability.
"""
if model is None:
model = global_model
if obs is None:
obs = global_obs
# Calculate prior probability and exit if not within prior
lnp_prior = model.prior_product(theta)
if not np.isfinite(lnp_prior):
return -np.infty
# Generate mean model
t1 = time.time()
try:
spec, phot, x = model.mean_model(theta, obs, sps=sps)
except(ValueError):
return -np.infty
d1 = time.time() - t1
# Return chi vectors for least-squares optimization
if residuals:
chispec = chi_spec(spec, obs)
chiphot = chi_phot(phot, obs)
return np.concatenate([chispec, chiphot])
# Noise modeling
if spec_noise is not None:
spec_noise.update(**model.params)
if phot_noise is not None:
phot_noise.update(**model.params)
vectors = {'spec': spec, 'unc': obs['unc'],
'sed': model._spec, 'cal': model._speccal,
'phot': phot, 'maggies_unc': obs['maggies_unc']}
# Calculate likelihoods
t2 = time.time()
lnp_spec = lnlike_spec(spec, obs=obs, spec_noise=spec_noise, **vectors)
lnp_phot = lnlike_phot(phot, obs=obs, phot_noise=phot_noise, **vectors)
d2 = time.time() - t2
if verbose:
write_log(theta, lnp_prior, lnp_spec, lnp_phot, d1, d2)
return lnp_prior + lnp_phot + lnp_spec
# -----------------
# MPI pool. This must be done *after* lnprob and
# chi2 are defined since slaves will only see up to
# sys.exit()
# ------------------
try:
from emcee.utils import MPIPool
pool = MPIPool(debug=False, loadbalance=True)
if not pool.is_master():
# Wait for instructions from the master process.
pool.wait()
sys.exit(0)
except(ImportError, ValueError):
pool = None
print('Not using MPI')
def halt(message):
"""Exit, closing pool safely.
"""
print(message)
try:
pool.close()
except:
pass
sys.exit(0)
# --------------
# Master branch
# --------------
if __name__ == "__main__":
# --------------
# Setup
# --------------
rp = run_params
rp['sys.argv'] = sys.argv
try:
rp['sps_libraries'] = sps.ssp.libraries
except(AttributeError):
rp['sps_libraries'] = None
# Use the globals
model = global_model
obsdat = global_obs
postkwargs = {}
# make zeros into tiny numbers
initial_theta = model.rectify_theta(model.initial_theta)
if rp.get('debug', False):
halt('stopping for debug')
# Try to set up an HDF5 file and write basic info to it
outroot = "{}_restart_{}".format(rp['outfile'], int(time.time()))
odir = os.path.dirname(os.path.abspath(outroot))
if (not os.path.exists(odir)):
halt('Target output directory {} does not exist, please make it.'.format(odir))
try:
import h5py
hfilename = outroot + '_mcmc.h5'
hfile = h5py.File(hfilename, "a")
print("Writing to file {}".format(hfilename))
write_results.write_h5_header(hfile, run_params, model)
write_results.write_obs_to_h5(hfile, obsdat)
except(ImportError):
hfile = None
# -----------------------------------------
# Initial guesses from end of last chain
# -----------------------------------------
initial_positions = result["chain"][:, -1, :]
guesses = None
initial_center = initial_positions.mean(axis=0)
# ---------------------
# Sampling
# -----------------------
if rp['verbose']:
print('emcee sampling...')
tstart = time.time()
out = fitting.restart_emcee_sampler(lnprobfn, initial_positions,
postkwargs=postkwargs,
pool=pool, hdf5=hfile, **rp)
esampler = out
edur = time.time() - tstart
if rp['verbose']:
print('done emcee in {0}s'.format(edur))
# -------------------------
# Output HDF5 (and pickles if asked for)
# -------------------------
print("Writing to {}".format(outroot))
if rp.get("output_pickles", False):
write_results.write_pickles(rp, model, obsdat, esampler, guesses,
outroot=outroot, toptimize=0, tsample=edur,
sampling_initial_center=initial_center)
if hfile is None:
hfile = hfilename
write_results.write_hdf5(hfile, rp, model, obsdat, esampler, guesses,
toptimize=0, tsample=edur,
sampling_initial_center=initial_center)
try:
hfile.close()
except:
pass
halt('Finished')
| mit |
Deepakkothandan/ansible | test/units/playbook/role/test_role.py | 89 | 9043 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from units.mock.loader import DictDataLoader
from units.mock.path import mock_unfrackpath_noop
from ansible.playbook.role import Role
from ansible.playbook.role.include import RoleInclude
from ansible.playbook.role import hash_params
class TestHashParams(unittest.TestCase):
def test(self):
params = {'foo': 'bar'}
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def _assert_hashable(self, res):
a_dict = {}
try:
a_dict[res] = res
except TypeError as e:
self.fail('%s is not hashable: %s' % (res, e))
def _assert_set(self, res):
self.assertIsInstance(res, frozenset)
def test_dict_tuple(self):
params = {'foo': (1, 'bar',)}
res = hash_params(params)
self._assert_set(res)
def test_tuple(self):
params = (1, None, 'foo')
res = hash_params(params)
self._assert_hashable(res)
def test_tuple_dict(self):
params = ({'foo': 'bar'}, 37)
res = hash_params(params)
self._assert_hashable(res)
def test_list(self):
params = ['foo', 'bar', 1, 37, None]
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def test_dict_with_list_value(self):
params = {'foo': [1, 4, 'bar']}
res = hash_params(params)
self._assert_set(res)
self._assert_hashable(res)
def test_empty_set(self):
params = set([])
res = hash_params(params)
self._assert_hashable(res)
self._assert_set(res)
def test_generator(self):
def my_generator():
for i in ['a', 1, None, {}]:
yield i
params = my_generator()
res = hash_params(params)
self._assert_hashable(res)
def test_container_but_not_iterable(self):
# This is a Container that is not iterable, which is unlikely but...
class MyContainer(collections.Container):
def __init__(self, some_thing):
self.data = []
self.data.append(some_thing)
def __contains__(self, item):
return item in self.data
def __hash__(self):
return hash(self.data)
def __len__(self):
return len(self.data)
def __call__(self):
return False
foo = MyContainer('foo bar')
params = foo
self.assertRaises(TypeError, hash_params, params)
class TestRole(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_tasks(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_tasks/tasks/main.yml": """
- shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_tasks', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(str(r), 'foo_tasks')
self.assertEqual(len(r._task_blocks), 1)
assert isinstance(r._task_blocks[0], Block)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_handlers(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_handlers/handlers/main.yml": """
- name: test handler
shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_handlers', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(len(r._handler_blocks), 1)
assert isinstance(r._handler_blocks[0], Block)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_vars(self):
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_vars/defaults/main.yml": """
foo: bar
""",
"/etc/ansible/roles/foo_vars/vars/main.yml": """
foo: bam
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_vars', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r._default_vars, dict(foo='bar'))
self.assertEqual(r._role_vars, dict(foo='bam'))
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_with_metadata(self):
fake_loader = DictDataLoader({
'/etc/ansible/roles/foo_metadata/meta/main.yml': """
allow_duplicates: true
dependencies:
- bar_metadata
galaxy_info:
a: 1
b: 2
c: 3
""",
'/etc/ansible/roles/bar_metadata/meta/main.yml': """
dependencies:
- baz_metadata
""",
'/etc/ansible/roles/baz_metadata/meta/main.yml': """
dependencies:
- bam_metadata
""",
'/etc/ansible/roles/bam_metadata/meta/main.yml': """
dependencies: []
""",
'/etc/ansible/roles/bad1_metadata/meta/main.yml': """
1
""",
'/etc/ansible/roles/bad2_metadata/meta/main.yml': """
foo: bar
""",
'/etc/ansible/roles/recursive1_metadata/meta/main.yml': """
dependencies: ['recursive2_metadata']
""",
'/etc/ansible/roles/recursive2_metadata/meta/main.yml': """
dependencies: ['recursive1_metadata']
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load('foo_metadata', play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
role_deps = r.get_direct_dependencies()
self.assertEqual(len(role_deps), 1)
self.assertEqual(type(role_deps[0]), Role)
self.assertEqual(len(role_deps[0].get_parents()), 1)
self.assertEqual(role_deps[0].get_parents()[0], r)
self.assertEqual(r._metadata.allow_duplicates, True)
self.assertEqual(r._metadata.galaxy_info, dict(a=1, b=2, c=3))
all_deps = r.get_all_dependencies()
self.assertEqual(len(all_deps), 3)
self.assertEqual(all_deps[0].get_name(), 'bam_metadata')
self.assertEqual(all_deps[1].get_name(), 'baz_metadata')
self.assertEqual(all_deps[2].get_name(), 'bar_metadata')
i = RoleInclude.load('bad1_metadata', play=mock_play, loader=fake_loader)
self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
i = RoleInclude.load('bad2_metadata', play=mock_play, loader=fake_loader)
self.assertRaises(AnsibleParserError, Role.load, i, play=mock_play)
i = RoleInclude.load('recursive1_metadata', play=mock_play, loader=fake_loader)
self.assertRaises(AnsibleError, Role.load, i, play=mock_play)
@patch('ansible.playbook.role.definition.unfrackpath', mock_unfrackpath_noop)
def test_load_role_complex(self):
# FIXME: add tests for the more complex uses of
# params and tags/when statements
fake_loader = DictDataLoader({
"/etc/ansible/roles/foo_complex/tasks/main.yml": """
- shell: echo 'hello world'
""",
})
mock_play = MagicMock()
mock_play.ROLE_CACHE = {}
i = RoleInclude.load(dict(role='foo_complex'), play=mock_play, loader=fake_loader)
r = Role.load(i, play=mock_play)
self.assertEqual(r.get_name(), "foo_complex")
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.